From a0fb2f513c629e27502b833af7c880dcb3290206 Mon Sep 17 00:00:00 2001 From: Julien Duchesne Date: Thu, 9 Jan 2025 10:17:50 -0500 Subject: [PATCH 01/18] Bump to latest mimir-prometheus (#10383) * Bump to latest mimir-prometheus Version synced here: https://github.com/grafana/mimir-prometheus/pull/818 * Pull MQE functions tests Fix everything related to https://github.com/prometheus/prometheus/pull/15686 We don't support `double_exponential_smoothing` and `predict_linear`, but `deriv` needs to emit a new annotation * Add CHANGELOG * Add back deriv to test gauntlet * Update to `mimir-prometheus`' main branch * Apply suggestions from code review Co-authored-by: Arthur Silva Sens --------- Co-authored-by: Joshua Hesketh Co-authored-by: Arthur Silva Sens --- CHANGELOG.md | 2 + go.mod | 33 +- go.sum | 86 +- pkg/distributor/otel.go | 4 +- .../otlp/metrics_to_prw_generated.go | 7 +- .../activeseries/active_labels_test.go | 2 +- .../operators/functions/range_vectors.go | 25 +- .../testdata/ours/functions.test | 2 +- .../testdata/upstream/functions.test | 70 +- pkg/util/test/histogram.go | 8 +- vendor/cloud.google.com/go/auth/CHANGES.md | 39 + vendor/cloud.google.com/go/auth/auth.go | 12 +- .../go/auth/credentials/compute.go | 12 +- .../go/auth/credentials/detect.go | 23 +- .../go/auth/credentials/filetypes.go | 6 + .../internal/externalaccount/aws_provider.go | 11 + .../externalaccount/externalaccount.go | 23 +- .../internal/externalaccount/url_provider.go | 5 + .../externalaccountuser.go | 5 + .../go/auth/credentials/internal/gdch/gdch.go | 17 +- .../internal/impersonate/impersonate.go | 10 + .../internal/stsexchange/sts_exchange.go | 6 + .../go/auth/credentials/selfsignedjwt.go | 16 +- .../go/auth/grpctransport/directpath.go | 6 +- .../go/auth/grpctransport/grpctransport.go | 24 +- .../go/auth/httptransport/httptransport.go | 15 + .../go/auth/httptransport/trace.go | 93 -- .../go/auth/httptransport/transport.go | 14 - .../go/auth/internal/compute/compute.go | 3 +- .../go/auth/internal/internal.go | 25 +- .../go/auth/internal/jwt/jwt.go | 4 +- .../go/auth/internal/transport/cba.go | 44 +- .../go/auth/internal/transport/s2a.go | 24 +- .../go/auth/internal/transport/transport.go | 1 + .../go/auth/oauth2adapt/CHANGES.md | 7 + .../go/auth/oauth2adapt/oauth2adapt.go | 9 +- .../cloud.google.com/go/auth/threelegged.go | 14 + .../go/compute/metadata/CHANGES.md | 7 + .../go/compute/metadata/log.go | 149 +++ .../go/compute/metadata/metadata.go | 55 +- .../github.com/fsnotify/fsnotify/.cirrus.yml | 7 +- .../fsnotify/fsnotify/.editorconfig | 12 - .../fsnotify/fsnotify/.gitattributes | 1 - .../github.com/fsnotify/fsnotify/.gitignore | 3 + .../github.com/fsnotify/fsnotify/CHANGELOG.md | 34 +- .../fsnotify/fsnotify/CONTRIBUTING.md | 120 ++- .../fsnotify/fsnotify/backend_fen.go | 324 ++----- .../fsnotify/fsnotify/backend_inotify.go | 594 ++++++------ .../fsnotify/fsnotify/backend_kqueue.go | 747 ++++++++-------- .../fsnotify/fsnotify/backend_other.go | 204 +---- .../fsnotify/fsnotify/backend_windows.go | 305 ++----- .../github.com/fsnotify/fsnotify/fsnotify.go | 368 +++++++- .../fsnotify/fsnotify/internal/darwin.go | 39 + .../fsnotify/internal/debug_darwin.go | 57 ++ .../fsnotify/internal/debug_dragonfly.go | 33 + .../fsnotify/internal/debug_freebsd.go | 42 + .../fsnotify/internal/debug_kqueue.go | 32 + .../fsnotify/fsnotify/internal/debug_linux.go | 56 ++ .../fsnotify/internal/debug_netbsd.go | 25 + .../fsnotify/internal/debug_openbsd.go | 28 + .../fsnotify/internal/debug_solaris.go | 45 + .../fsnotify/internal/debug_windows.go | 40 + .../fsnotify/fsnotify/internal/freebsd.go | 31 + .../fsnotify/fsnotify/internal/internal.go | 2 + .../fsnotify/fsnotify/internal/unix.go | 31 + .../fsnotify/fsnotify/internal/unix2.go | 7 + .../fsnotify/fsnotify/internal/windows.go | 41 + vendor/github.com/fsnotify/fsnotify/mkdoc.zsh | 259 ------ .../fsnotify/fsnotify/system_bsd.go | 1 - .../fsnotify/fsnotify/system_darwin.go | 1 - .../go-openapi/jsonpointer/pointer.go | 2 +- .../github.com/google/pprof/profile/encode.go | 5 + .../github.com/google/pprof/profile/merge.go | 5 + .../google/pprof/profile/profile.go | 7 +- .../prometheus/model/histogram/test_utils.go | 4 +- .../prometheus/model/textparse/nhcbparse.go | 90 +- .../prometheus/prometheus/promql/engine.go | 20 +- .../prometheus/prometheus/promql/functions.go | 33 +- .../promql/promqltest/testdata/functions.test | 61 +- .../prometheus/prometheus/rules/group.go | 249 +++--- .../prometheus/prometheus/scrape/scrape.go | 8 +- .../prometheus/prometheus/scrape/target.go | 110 +-- .../prometheus/helpers_from_stdlib.go | 106 --- ...rmalize_name.go => metric_name_builder.go} | 262 +++--- .../prometheusremotewrite/metrics_to_prw.go | 7 +- .../storage/remote/write_handler.go | 40 +- .../prometheus/template/template.go | 4 +- .../prometheus/prometheus/tsdb/db.go | 2 +- .../prometheus/prometheus/tsdb/head_append.go | 52 +- .../prometheus/prometheus/tsdb/head_read.go | 15 +- .../prometheus/prometheus/tsdb/head_wal.go | 8 +- .../prometheus/tsdb/index/postings.go | 58 +- .../prometheus/tsdb/record/record.go | 121 ++- .../prometheus/prometheus/tsdb/testutil.go | 50 +- .../prometheus/tsdb/tsdbutil/histogram.go | 50 +- .../prometheus/tsdb/wlog/checkpoint.go | 49 +- .../prometheus/tsdb/wlog/watcher.go | 4 +- .../util/annotations/annotations.go | 8 + .../prometheus/util/testutil/context.go | 2 +- .../prometheus/prometheus/web/api/v1/api.go | 18 +- .../internal/tagencoding/tagencoding.go | 75 -- .../go.opencensus.io/metric/metricdata/doc.go | 19 - .../metric/metricdata/exemplar.go | 38 - .../metric/metricdata/label.go | 35 - .../metric/metricdata/metric.go | 46 - .../metric/metricdata/point.go | 193 ---- .../metric/metricdata/type_string.go | 16 - .../metric/metricdata/unit.go | 27 - .../metric/metricproducer/manager.go | 78 -- .../metric/metricproducer/producer.go | 28 - .../go.opencensus.io/plugin/ocgrpc/client.go | 56 -- .../plugin/ocgrpc/client_metrics.go | 118 --- .../plugin/ocgrpc/client_stats_handler.go | 49 - vendor/go.opencensus.io/plugin/ocgrpc/doc.go | 19 - .../go.opencensus.io/plugin/ocgrpc/server.go | 81 -- .../plugin/ocgrpc/server_metrics.go | 108 --- .../plugin/ocgrpc/server_stats_handler.go | 63 -- .../plugin/ocgrpc/stats_common.go | 248 ----- .../plugin/ocgrpc/trace_common.go | 107 --- .../go.opencensus.io/plugin/ochttp/client.go | 117 --- .../plugin/ochttp/client_stats.go | 143 --- vendor/go.opencensus.io/plugin/ochttp/doc.go | 19 - .../plugin/ochttp/propagation/b3/b3.go | 123 --- .../go.opencensus.io/plugin/ochttp/route.go | 61 -- .../go.opencensus.io/plugin/ochttp/server.go | 455 ---------- .../ochttp/span_annotating_client_trace.go | 169 ---- .../go.opencensus.io/plugin/ochttp/stats.go | 292 ------ .../go.opencensus.io/plugin/ochttp/trace.go | 244 ----- .../plugin/ochttp/wrapped_body.go | 44 - vendor/go.opencensus.io/resource/resource.go | 164 ---- vendor/go.opencensus.io/stats/doc.go | 68 -- .../go.opencensus.io/stats/internal/record.go | 31 - vendor/go.opencensus.io/stats/measure.go | 109 --- .../go.opencensus.io/stats/measure_float64.go | 55 -- .../go.opencensus.io/stats/measure_int64.go | 55 -- vendor/go.opencensus.io/stats/record.go | 156 ---- vendor/go.opencensus.io/stats/units.go | 26 - .../stats/view/aggregation.go | 123 --- .../stats/view/aggregation_data.go | 336 ------- .../go.opencensus.io/stats/view/collector.go | 93 -- vendor/go.opencensus.io/stats/view/doc.go | 47 - vendor/go.opencensus.io/stats/view/export.go | 45 - vendor/go.opencensus.io/stats/view/view.go | 221 ----- .../stats/view/view_to_metric.go | 147 --- vendor/go.opencensus.io/stats/view/worker.go | 424 --------- .../stats/view/worker_commands.go | 186 ---- vendor/go.opencensus.io/tag/context.go | 43 - vendor/go.opencensus.io/tag/doc.go | 26 - vendor/go.opencensus.io/tag/key.go | 44 - vendor/go.opencensus.io/tag/map.go | 229 ----- vendor/go.opencensus.io/tag/map_codec.go | 239 ----- vendor/go.opencensus.io/tag/metadata.go | 52 -- vendor/go.opencensus.io/tag/profile_19.go | 32 - vendor/go.opencensus.io/tag/profile_not19.go | 24 - vendor/go.opencensus.io/tag/validate.go | 56 -- .../trace/propagation/propagation.go | 108 --- .../auto/sdk/CONTRIBUTING.md | 27 + vendor/go.opentelemetry.io/auto/sdk/LICENSE | 201 +++++ .../auto/sdk/VERSIONING.md | 15 + vendor/go.opentelemetry.io/auto/sdk/doc.go | 14 + .../auto/sdk/internal/telemetry/attr.go | 58 ++ .../auto/sdk/internal/telemetry/doc.go | 8 + .../auto/sdk/internal/telemetry/id.go | 103 +++ .../auto/sdk/internal/telemetry/number.go | 67 ++ .../auto/sdk/internal/telemetry/resource.go | 66 ++ .../auto/sdk/internal/telemetry/scope.go | 67 ++ .../auto/sdk/internal/telemetry/span.go | 456 ++++++++++ .../auto/sdk/internal/telemetry/status.go | 40 + .../auto/sdk/internal/telemetry/traces.go | 189 ++++ .../auto/sdk/internal/telemetry/value.go | 452 ++++++++++ vendor/go.opentelemetry.io/auto/sdk/limit.go | 94 ++ vendor/go.opentelemetry.io/auto/sdk/span.go | 432 +++++++++ vendor/go.opentelemetry.io/auto/sdk/tracer.go | 124 +++ .../auto/sdk/tracer_provider.go | 33 + .../http/httptrace/otelhttptrace/version.go | 2 +- .../net/http/otelhttp/client.go | 6 +- .../net/http/otelhttp/internal/semconv/env.go | 50 +- .../otelhttp/internal/semconv/httpconv.go | 34 +- .../http/otelhttp/internal/semconv/util.go | 4 +- .../http/otelhttp/internal/semconv/v1.20.0.go | 32 +- .../net/http/otelhttp/version.go | 2 +- vendor/go.opentelemetry.io/otel/.golangci.yml | 18 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 32 +- vendor/go.opentelemetry.io/otel/Makefile | 14 +- vendor/go.opentelemetry.io/otel/VERSIONING.md | 2 +- .../otel/baggage/baggage.go | 4 +- .../go.opentelemetry.io/otel/codes/codes.go | 3 +- .../otel/internal/attribute/attribute.go | 44 +- .../otel/internal/global/trace.go | 25 + .../go.opentelemetry.io/otel/trace/config.go | 2 +- .../otel/trace/noop/README.md | 3 + .../otel/trace/noop/noop.go | 112 +++ vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 8 +- .../api/googleapi/googleapi.go | 10 + .../iamcredentials/v1/iamcredentials-gen.go | 51 +- .../google.golang.org/api/internal/creds.go | 45 +- .../api/internal/gensupport/media.go | 3 + .../api/internal/gensupport/resumable.go | 2 +- .../api/internal/gensupport/send.go | 17 + .../api/internal/settings.go | 3 + .../google.golang.org/api/internal/version.go | 2 +- .../option/internaloption/internaloption.go | 59 ++ .../api/storage/v1/storage-api.json | 96 +- .../api/storage/v1/storage-gen.go | 846 ++++++++++++------ .../api/transport/grpc/dial.go | 53 +- .../api/transport/http/dial.go | 17 +- .../http/internal/propagation/http.go | 87 -- .../googleapis/api/annotations/client.pb.go | 707 +++++++++------ vendor/modules.txt | 69 +- 210 files changed, 7459 insertions(+), 9520 deletions(-) delete mode 100644 vendor/cloud.google.com/go/auth/httptransport/trace.go create mode 100644 vendor/cloud.google.com/go/compute/metadata/log.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/.editorconfig delete mode 100644 vendor/github.com/fsnotify/fsnotify/.gitattributes create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/freebsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/internal.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/unix.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/unix2.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/windows.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/mkdoc.zsh delete mode 100644 vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go rename vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/{normalize_name.go => metric_name_builder.go} (54%) delete mode 100644 vendor/go.opencensus.io/internal/tagencoding/tagencoding.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/doc.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/exemplar.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/label.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/metric.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/point.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/type_string.go delete mode 100644 vendor/go.opencensus.io/metric/metricdata/unit.go delete mode 100644 vendor/go.opencensus.io/metric/metricproducer/manager.go delete mode 100644 vendor/go.opencensus.io/metric/metricproducer/producer.go delete mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/client.go delete mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go delete mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go delete mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/doc.go delete mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/server.go delete mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go delete mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go delete mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go delete mode 100644 vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/client.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/client_stats.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/doc.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/route.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/server.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/stats.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/trace.go delete mode 100644 vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go delete mode 100644 vendor/go.opencensus.io/resource/resource.go delete mode 100644 vendor/go.opencensus.io/stats/doc.go delete mode 100644 vendor/go.opencensus.io/stats/internal/record.go delete mode 100644 vendor/go.opencensus.io/stats/measure.go delete mode 100644 vendor/go.opencensus.io/stats/measure_float64.go delete mode 100644 vendor/go.opencensus.io/stats/measure_int64.go delete mode 100644 vendor/go.opencensus.io/stats/record.go delete mode 100644 vendor/go.opencensus.io/stats/units.go delete mode 100644 vendor/go.opencensus.io/stats/view/aggregation.go delete mode 100644 vendor/go.opencensus.io/stats/view/aggregation_data.go delete mode 100644 vendor/go.opencensus.io/stats/view/collector.go delete mode 100644 vendor/go.opencensus.io/stats/view/doc.go delete mode 100644 vendor/go.opencensus.io/stats/view/export.go delete mode 100644 vendor/go.opencensus.io/stats/view/view.go delete mode 100644 vendor/go.opencensus.io/stats/view/view_to_metric.go delete mode 100644 vendor/go.opencensus.io/stats/view/worker.go delete mode 100644 vendor/go.opencensus.io/stats/view/worker_commands.go delete mode 100644 vendor/go.opencensus.io/tag/context.go delete mode 100644 vendor/go.opencensus.io/tag/doc.go delete mode 100644 vendor/go.opencensus.io/tag/key.go delete mode 100644 vendor/go.opencensus.io/tag/map.go delete mode 100644 vendor/go.opencensus.io/tag/map_codec.go delete mode 100644 vendor/go.opencensus.io/tag/metadata.go delete mode 100644 vendor/go.opencensus.io/tag/profile_19.go delete mode 100644 vendor/go.opencensus.io/tag/profile_not19.go delete mode 100644 vendor/go.opencensus.io/tag/validate.go delete mode 100644 vendor/go.opencensus.io/trace/propagation/propagation.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md create mode 100644 vendor/go.opentelemetry.io/auto/sdk/LICENSE create mode 100644 vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md create mode 100644 vendor/go.opentelemetry.io/auto/sdk/doc.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/limit.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/span.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/tracer.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/noop/README.md create mode 100644 vendor/go.opentelemetry.io/otel/trace/noop/noop.go delete mode 100644 vendor/google.golang.org/api/transport/http/internal/propagation/http.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 6dbcf453568..cb9ed7c2e96 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,8 @@ * [BUGFIX] PromQL: Fix various UTF-8 bugs related to quoting. https://github.com/prometheus/prometheus/pull/15531 #10258 * [BUGFIX] Ruler: Fixed an issue when using the experimental `-ruler.max-independent-rule-evaluation-concurrency` feature, where if a rule group was eligible for concurrency, it would flap between running concurrently or not based on the time it took after running concurrently. #9726 #10189 * [BUGFIX] Mimirtool: `remote-read` commands will now return data. #10286 +* [BUGFIX] PromQL: Fix deriv, predict_linear and double_exponential_smoothing with histograms https://github.com/prometheus/prometheus/pull/15686 #10383 +* [BUGFIX] MQE: Fix deriv with histograms #10383 ### Mixin diff --git a/go.mod b/go.mod index dba493af1e5..ff34ae71072 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,7 @@ require ( golang.org/x/net v0.33.0 golang.org/x/sync v0.10.0 golang.org/x/time v0.8.0 - google.golang.org/grpc v1.68.1 + google.golang.org/grpc v1.69.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -85,18 +85,18 @@ require ( github.com/twmb/franz-go/plugin/kprom v1.1.0 github.com/xlab/treeprint v1.2.0 go.opentelemetry.io/collector/pdata v1.22.0 - go.opentelemetry.io/otel v1.32.0 - go.opentelemetry.io/otel/trace v1.32.0 + go.opentelemetry.io/otel v1.33.0 + go.opentelemetry.io/otel/trace v1.33.0 go.uber.org/multierr v1.11.0 golang.org/x/term v0.27.0 - google.golang.org/api v0.209.0 + google.golang.org/api v0.213.0 google.golang.org/protobuf v1.36.1 sigs.k8s.io/kustomize/kyaml v0.18.1 ) require ( - cloud.google.com/go/auth v0.10.2 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect + cloud.google.com/go/auth v0.13.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect @@ -138,8 +138,9 @@ require ( github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.57.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.30.0 // indirect gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect gopkg.in/mail.v2 v2.3.1 // indirect @@ -151,7 +152,7 @@ require ( require ( cloud.google.com/go v0.116.0 // indirect - cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/compute/metadata v0.6.0 // indirect cloud.google.com/go/iam v1.2.2 // indirect github.com/DmitriyVTitov/size v1.5.0 // indirect github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect @@ -186,14 +187,14 @@ require ( github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.22.2 // indirect github.com/go-openapi/errors v0.22.0 // indirect - github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/loads v0.21.5 // indirect github.com/go-openapi/runtime v0.27.1 // indirect @@ -204,7 +205,7 @@ require ( github.com/gogo/googleapis v1.4.1 // indirect github.com/google/btree v1.1.2 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da // indirect + github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect github.com/google/s2a-go v0.1.8 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.14.0 // indirect @@ -250,7 +251,7 @@ require ( github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/exporter-toolkit v0.13.1 // indirect + github.com/prometheus/exporter-toolkit v0.13.2 // indirect github.com/rainycape/unidecode v0.0.0-20150907023854-cb7f23ec59be // indirect github.com/rs/cors v1.11.0 // indirect github.com/rs/xid v1.6.0 // indirect @@ -269,8 +270,8 @@ require ( go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/collector/semconv v0.116.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect go.uber.org/zap v1.21.0 // indirect golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e // indirect golang.org/x/mod v0.22.0 // indirect @@ -279,14 +280,14 @@ require ( golang.org/x/text v0.21.0 // indirect golang.org/x/tools v0.28.0 // indirect google.golang.org/genproto v0.0.0-20241113202542-65e8d215514f // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) -replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20250102152619-93fa7617c041 +replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20250109135143-114aaaadc203 // Replace memberlist with our fork which includes some fixes that haven't been // merged upstream yet: diff --git a/go.sum b/go.sum index 973ab8a936b..de3ca81db05 100644 --- a/go.sum +++ b/go.sum @@ -117,10 +117,10 @@ cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEar cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= -cloud.google.com/go/auth v0.10.2 h1:oKF7rgBfSHdp/kuhXtqU/tNDr0mZqhYbEh+6SiqzkKo= -cloud.google.com/go/auth v0.10.2/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= -cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= -cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs= +cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q= +cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= +cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= @@ -218,8 +218,8 @@ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1h cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= -cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= @@ -958,14 +958,14 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczC github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/digitalocean/godo v1.131.0 h1:0WHymufAV5avpodT0h5/pucUVfO4v7biquOIqhLeROY= -github.com/digitalocean/godo v1.131.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= +github.com/digitalocean/godo v1.132.0 h1:n0x6+ZkwbyQBtIU1wwBhv26EINqHg0wWQiBXlwYg/HQ= +github.com/digitalocean/godo v1.132.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.11.4 h1:rPYF9/LECdNymJufQKmri9gV604RvvABwgOA8un7yAo= github.com/dlclark/regexp2 v1.11.4/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= -github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.4.1+incompatible h1:ZJvcY7gfwHn1JF48PfbyXg7Jyt9ZCWDW+GGXOIxEwp4= +github.com/docker/docker v27.4.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y= github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -1017,8 +1017,8 @@ github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzP github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474 h1:TufioMBjkJ6/Oqmlye/ReuxHFS35HyLmypj/BNy/8GY= github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474/go.mod h1:PQwxF4UU8wuL+srGxr3BOhIW5zXqgucwVlO/nPZLsxw= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= @@ -1061,8 +1061,8 @@ github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE github.com/go-openapi/analysis v0.22.2/go.mod h1:pDF4UbZsQTo/oNuRfAWWd4dAh4yuYf//LYorPTjrpvo= github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= -github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= -github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRzT0= @@ -1221,8 +1221,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g= -github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= @@ -1283,8 +1283,8 @@ github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40 h1:1TeKhyS+pvzO github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40/go.mod h1:IGRj8oOoxwJbHBYl1+OhS9UjQR0dv6SQOep7HqmtyFU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/grafana/mimir-prometheus v0.0.0-20250102152619-93fa7617c041 h1:tZFQRbiyOW630aJ7r+p+N3kUWhMVeWLRGSjZsK9KA0s= -github.com/grafana/mimir-prometheus v0.0.0-20250102152619-93fa7617c041/go.mod h1:a5LEa2Vy87wOp0Vu6sLmEIR1V59fqH3QosOSiErAr30= +github.com/grafana/mimir-prometheus v0.0.0-20250109135143-114aaaadc203 h1:gCU3GO2mZUzsLAa/JRRDJpKbYhkXy7caWnzfNqbgDig= +github.com/grafana/mimir-prometheus v0.0.0-20250109135143-114aaaadc203/go.mod h1:KfyZCeyGxf5gvl6VZbrQsd400nJjGw+ygMEtDVZKIT4= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956 h1:em1oddjXL8c1tL0iFdtVtPloq2hRPen2MJQKoAWpxu0= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/grafana/prometheus-alertmanager v0.25.1-0.20240930132144-b5e64e81e8d3 h1:6D2gGAwyQBElSrp3E+9lSr7k8gLuP3Aiy20rweLWeBw= @@ -1361,8 +1361,8 @@ github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31 github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1avVOnu2cfms8VAiD8lUq3vWI8mTocOXN/w= -github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= +github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec h1:+YBzb977VrmffaCX/OBm17dEVJUcWn5dW+eqs3aIJ/A= +github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= @@ -1375,8 +1375,8 @@ github.com/hashicorp/vault/api/auth/kubernetes v0.8.0 h1:6jPcORq7OHwf+MCbaaUmiBv github.com/hashicorp/vault/api/auth/kubernetes v0.8.0/go.mod h1:nfl5sRUUork0ZSfV3xf+pgAFQSD5kSkL0k9axg523DM= github.com/hashicorp/vault/api/auth/userpass v0.8.0 h1:JFFzMld+VO/S1v8HQNJzcy+3o+xfx/iH49dsiQ1G5jk= github.com/hashicorp/vault/api/auth/userpass v0.8.0/go.mod h1:+XbsSnbbyo+yjySfKcIsyl28kO4C/c4Czo7og0XCtUo= -github.com/hetznercloud/hcloud-go/v2 v2.17.0 h1:ge0w2piey9SV6XGyU/wQ6HBR24QyMbJ3wLzezplqR68= -github.com/hetznercloud/hcloud-go/v2 v2.17.0/go.mod h1:zfyZ4Orx+mPpYDzWAxXR7DHGL50nnlZ5Edzgs1o6f/s= +github.com/hetznercloud/hcloud-go/v2 v2.17.1 h1:DPi019dv0WCiECEmtcuTgc//hBvnxESb6QlJnAb4a04= +github.com/hetznercloud/hcloud-go/v2 v2.17.1/go.mod h1:6ygmBba+FdawR2lLp/d9uJljY2k0dTYthprrI8usdLw= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= @@ -1619,8 +1619,8 @@ github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFS github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.13.1 h1:Evsh0gWQo2bdOHlnz9+0Nm7/OFfIwhE2Ws4A2jIlR04= -github.com/prometheus/exporter-toolkit v0.13.1/go.mod h1:ujdv2YIOxtdFxxqtloLpbqmxd5J0Le6IITUvIRSWjj0= +github.com/prometheus/exporter-toolkit v0.13.2 h1:Z02fYtbqTMy2i/f+xZ+UK5jy/bl1Ex3ndzh06T/Q9DQ= +github.com/prometheus/exporter-toolkit v0.13.2/go.mod h1:tCqnfx21q6qN1KA4U3Bfb8uWzXfijIrJz3/kTIqMV7g= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= @@ -1640,8 +1640,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= @@ -1783,26 +1783,28 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/collector/pdata v1.22.0 h1:3yhjL46NLdTMoP8rkkcE9B0pzjf2973crn0KKhX5UrI= go.opentelemetry.io/collector/pdata v1.22.0/go.mod h1:nLLf6uDg8Kn5g3WNZwGyu8+kf77SwOqQvMTb5AXEbEY= go.opentelemetry.io/collector/semconv v0.116.0 h1:63xCZomsKJAWmKGWD3lnORiE3WKW6AO4LjnzcHzGx3Y= go.opentelemetry.io/collector/semconv v0.116.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.57.0 h1:7F3XCD6WYzDkwbi8I8N+oYJWquPVScnRosKGgqjsR8c= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.57.0/go.mod h1:Dk3C0BfIlZDZ5c6eVS7TYiH2vssuyUU3vUsgbrR+5V4= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0 h1:xwH3QJv6zL4u+gkPUu59NeT1Gyw9nScWT8FQpKLUJJI= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0/go.mod h1:uosvgpqTcTXtcPQORTbEkZNDQTCDOgTz1fe6aLSyqrQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= go.opentelemetry.io/otel/sdk/metric v1.30.0 h1:QJLT8Pe11jyHBHfSAgYH7kEmT24eX792jZO1bo4BXkM= go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -2344,8 +2346,8 @@ google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZ google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= -google.golang.org/api v0.209.0 h1:Ja2OXNlyRlWCWu8o+GgI4yUn/wz9h/5ZfFbKz+dQX+w= -google.golang.org/api v0.209.0/go.mod h1:I53S168Yr/PNDNMi5yPnDc0/LGRZO6o7PoEbl/HY3CM= +google.golang.org/api v0.213.0 h1:KmF6KaDyFqB417T68tMPbVmmwtIXs2VB60OJKIHB0xQ= +google.golang.org/api v0.213.0/go.mod h1:V0T5ZhNUUNpYAlL306gFZPFt5F5D/IeyLoktduYYnvQ= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -2508,8 +2510,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go. google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 h1:ChAdCYNQFDk5fYvFZMywKLIijG7TC2m1C2CMEu11G3o= +google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484/go.mod h1:KRUmxRI4JmbpAm8gcZM4Jsffi859fo5LQjILwuqj9z8= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= diff --git a/pkg/distributor/otel.go b/pkg/distributor/otel.go index 0c3570913e1..5318ea520e1 100644 --- a/pkg/distributor/otel.go +++ b/pkg/distributor/otel.go @@ -401,8 +401,8 @@ func otelMetricsToMetadata(addSuffixes bool, md pmetric.Metrics) []*mimirpb.Metr metric := scopeMetrics.Metrics().At(k) entry := mimirpb.MetricMetadata{ Type: otelMetricTypeToMimirMetricType(metric), - // TODO(krajorama): when UTF-8 is configurable from user limits, replace "false" appropriately. - MetricFamilyName: prometheustranslator.BuildCompliantName(metric, "", addSuffixes, false), + // TODO(krajorama): when UTF-8 is configurable from user limits, use BuildMetricName. See https://github.com/prometheus/prometheus/pull/15664 + MetricFamilyName: prometheustranslator.BuildCompliantMetricName(metric, "", addSuffixes), Help: metric.Description(), Unit: metric.Unit(), } diff --git a/pkg/distributor/otlp/metrics_to_prw_generated.go b/pkg/distributor/otlp/metrics_to_prw_generated.go index e9fdbbc8964..881789a6c0b 100644 --- a/pkg/distributor/otlp/metrics_to_prw_generated.go +++ b/pkg/distributor/otlp/metrics_to_prw_generated.go @@ -114,7 +114,12 @@ func (c *MimirConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, se continue } - promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes, settings.AllowUTF8) + var promName string + if settings.AllowUTF8 { + promName = prometheustranslator.BuildMetricName(metric, settings.Namespace, settings.AddMetricSuffixes) + } else { + promName = prometheustranslator.BuildCompliantMetricName(metric, settings.Namespace, settings.AddMetricSuffixes) + } c.metadata = append(c.metadata, mimirpb.MetricMetadata{ Type: otelMetricTypeToPromMetricType(metric), MetricFamilyName: promName, diff --git a/pkg/ingester/activeseries/active_labels_test.go b/pkg/ingester/activeseries/active_labels_test.go index aa7f928d7dd..30ab5321813 100644 --- a/pkg/ingester/activeseries/active_labels_test.go +++ b/pkg/ingester/activeseries/active_labels_test.go @@ -23,7 +23,7 @@ func (m *mockPostingsReader) Postings(ctx context.Context, name string, values . valuePostings := make([]index.Postings, 0, len(values)) for _, value := range values { - valuePostings = append(valuePostings, m.postings.Get(name, value)) + valuePostings = append(valuePostings, m.postings.Postings(ctx, name, value)) } return index.Merge(ctx, valuePostings...), nil diff --git a/pkg/streamingpromql/operators/functions/range_vectors.go b/pkg/streamingpromql/operators/functions/range_vectors.go index 4593a6694a9..f1270146064 100644 --- a/pkg/streamingpromql/operators/functions/range_vectors.go +++ b/pkg/streamingpromql/operators/functions/range_vectors.go @@ -468,18 +468,31 @@ func resetsChanges(isReset bool) RangeVectorStepFunction { } var Deriv = FunctionOverRangeVectorDefinition{ - SeriesMetadataFunction: DropSeriesName, - StepFunc: deriv, + SeriesMetadataFunction: DropSeriesName, + StepFunc: deriv, + NeedsSeriesNamesForAnnotations: true, } -func deriv(step *types.RangeVectorStepData, _ float64, _ types.EmitAnnotationFunc) (float64, bool, *histogram.FloatHistogram, error) { - head, tail := step.Floats.UnsafePoints() +func deriv(step *types.RangeVectorStepData, _ float64, emitAnnotation types.EmitAnnotationFunc) (float64, bool, *histogram.FloatHistogram, error) { + fHead, fTail := step.Floats.UnsafePoints() + hHead, hTail := step.Histograms.UnsafePoints() - if (len(head) + len(tail)) < 2 { + haveHistograms := len(hHead) > 0 || len(hTail) > 0 + + if len(fHead)+len(fTail) == 1 && haveHistograms { + emitAnnotation(annotations.NewHistogramIgnoredInMixedRangeInfo) + return 0, false, nil, nil + } + + if (len(fHead) + len(fTail)) < 2 { return 0, false, nil, nil } - slope, _ := linearRegression(head, tail, head[0].T) + slope, _ := linearRegression(fHead, fTail, fHead[0].T) + + if haveHistograms { + emitAnnotation(annotations.NewHistogramIgnoredInMixedRangeInfo) + } return slope, true, nil, nil } diff --git a/pkg/streamingpromql/testdata/ours/functions.test b/pkg/streamingpromql/testdata/ours/functions.test index e7cb57c3562..9105bc01848 100644 --- a/pkg/streamingpromql/testdata/ours/functions.test +++ b/pkg/streamingpromql/testdata/ours/functions.test @@ -544,7 +544,7 @@ load 1m metric{case="nhcb"} {{schema:-53 sum:1 count:5 custom_values:[5 10] buckets:[1 4]}} {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}} {{schema:-53 sum:3 count:15 custom_values:[5 10] buckets:[7 8]}} {{schema:-53 sum:3 count:15 custom_values:[5 10] buckets:[0 0]}} metric{case="floats, nh and nhcb"} 0 1 2 3 2 1 0 _ {{schema:3 sum:0 count:2 buckets:[1 2 1]}} {{schema:3 sum:0 count:1 buckets:[1 2 1]}} {{schema:3 sum:0 count:1 buckets:[1 0 1]}} {{schema:-53 sum:1 count:5 custom_values:[5 10] buckets:[1 4]}} {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}} {{schema:-53 sum:3 count:15 custom_values:[5 10] buckets:[7 8]}} {{schema:-53 sum:3 count:15 custom_values:[5 10] buckets:[0 0]}} -eval range from 0 to 20m step 1m deriv(metric[3m1s]) +eval_info range from 0 to 20m step 1m deriv(metric[3m1s]) {case="all Inf"} _ NaN NaN NaN NaN NaN NaN NaN NaN NaN {case="all NaN"} _ NaN NaN NaN NaN NaN NaN NaN NaN NaN {case="all floats 1"} _ 0.016666666666666666 0.016666666666666666 0.016666666666666666 0.016666666666666666 0.016666666666666666 0.016666666666666666 0.016666666666666666 0.016666666666666666 diff --git a/pkg/streamingpromql/testdata/upstream/functions.test b/pkg/streamingpromql/testdata/upstream/functions.test index 9e8edb1bdf0..004ef8d1995 100644 --- a/pkg/streamingpromql/testdata/upstream/functions.test +++ b/pkg/streamingpromql/testdata/upstream/functions.test @@ -239,12 +239,29 @@ clear load 5m http_requests{path="/foo"} 0 50 100 150 200 http_requests{path="/bar"} 200 150 100 50 0 + http_requests_gauge{path="/foo"} {{schema:0 sum:0 count:0 buckets:[0 0 0] counter_reset_hint:gauge}}+{{schema:0 sum:1 count:2 buckets:[1 1 1] counter_reset_hint:gauge}}x5 + http_requests_counter{path="/foo"} {{schema:0 sum:0 count:0 buckets:[0 0 0]}}+{{schema:0 sum:1 count:2 buckets:[1 1 1]}}x5 + http_requests_mix{path="/foo"} 0 50 100 {{schema:0 sum:0 count:0 buckets:[0 0 0] counter_reset_hint:gauge}} {{schema:0 sum:1 count:2 buckets:[1 1 1] counter_reset_hint:gauge}} # Unsupported by streaming engine. # eval instant at 20m delta(http_requests[20m]) # {path="/foo"} 200 # {path="/bar"} -200 +# Unsupported by streaming engine. +# eval instant at 20m delta(http_requests_gauge[20m]) +# {path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}} + +# Unsupported by streaming engine. +# # delta emits warn annotation for non-gauge histogram types. +# eval_warn instant at 20m delta(http_requests_counter[20m]) +# {path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}} + +# Unsupported by streaming engine. +# # delta emits warn annotation for mix of histogram and floats. +# eval_warn instant at 20m delta(http_requests_mix[20m]) +# #empty + clear # Tests for idelta(). @@ -264,7 +281,8 @@ load 5m http_requests_total{job="app-server", instance="1", group="canary"} 0+80x10 testcounter_reset_middle_mix 0+10x4 0+10x5 {{schema:0 sum:1 count:1}} {{schema:1 sum:2 count:2}} http_requests_mix{job="app-server", instance="1", group="canary"} 0+80x10 {{schema:0 sum:1 count:1}} - http_requests_histogram{job="app-server", instance="1", group="canary"} {{schema:0 sum:1 count:2}}x10 + http_requests_histogram{job="app-server", instance="1", group="canary"} {{schema:0 sum:1 count:2}}x10 + http_requests_inf{job="app-server", instance="1", group="canary"} -Inf 0+80x10 Inf # deriv should return the same as rate in simple cases. eval instant at 50m rate(http_requests_total{group="canary", instance="1", job="app-server"}[50m]) @@ -277,15 +295,20 @@ eval instant at 50m deriv(http_requests_total{group="canary", instance="1", job= eval instant at 50m deriv(testcounter_reset_middle_total[100m]) {} 0.010606060606060607 -# deriv should ignore histograms. -eval instant at 110m deriv(http_requests_mix{group="canary", instance="1", job="app-server"}[110m]) +# deriv should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation. +eval_info instant at 110m deriv(http_requests_mix{group="canary", instance="1", job="app-server"}[110m]) {group="canary", instance="1", job="app-server"} 0.26666666666666666 -eval instant at 100m deriv(testcounter_reset_middle_mix[110m]) +eval_info instant at 100m deriv(testcounter_reset_middle_mix[110m]) {} 0.010606060606060607 +# deriv should silently ignore ranges consisting only of histograms. eval instant at 50m deriv(http_requests_histogram[60m]) - #empty + #empty + +# deriv should return NaN in case of +Inf or -Inf found. +eval instant at 100m deriv(http_requests_inf[100m]) + {job="app-server", instance="1", group="canary"} NaN # predict_linear should return correct result. # X/s = [ 0, 300, 600, 900,1200,1500,1800,2100,2400,2700,3000] @@ -328,6 +351,25 @@ eval instant at 50m deriv(http_requests_histogram[60m]) # eval instant at 70m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600) # {} 89.54545454545455 +# Unsupported by streaming engine. +# # predict_linear should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation. +# eval_info instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 3000) +# {} 70 + +# Unsupported by streaming engine. +# eval_info instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 50m) +# {} 70 + +# Unsupported by streaming engine. +# # predict_linear should silently ignore ranges consisting only of histograms. +# eval instant at 60m predict_linear(http_requests_histogram[60m], 50m) +# #empty + +# Unsupported by streaming engine. +# # predict_linear should return NaN in case of +Inf or -Inf found. +# eval instant at 100m predict_linear(http_requests_inf[100m], 6000) +# {job="app-server", instance="1", group="canary"} NaN + # With http_requests_total, there is a sample value exactly at the end of # the range, and it has exactly the predicted value, so predict_linear # can be emulated with deriv. @@ -758,6 +800,11 @@ load 10s http_requests{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 http_requests{job="api-server", instance="1", group="canary"} 0+40x2000 + http_requests_mix{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000 {{schema:0 count:1 sum:2}}x1000 + http_requests_mix{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 {{schema:0 count:1 sum:2}}x1000 + http_requests_mix{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 {{schema:0 count:1 sum:2}}x1000 + http_requests_mix{job="api-server", instance="1", group="canary"} 0+40x2000 {{schema:0 count:1 sum:2}}x1000 + http_requests_histogram{job="api-server", instance="1", group="canary"} {{schema:0 count:1 sum:2}}x1000 # Unsupported by streaming engine. # eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1) @@ -766,6 +813,19 @@ load 10s # {job="api-server", instance="0", group="canary"} 24000 # {job="api-server", instance="1", group="canary"} 32000 +# Unsupported by streaming engine. +# # double_exponential_smoothing should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation. +# eval_info instant at 20010s double_exponential_smoothing(http_requests_mix[1m], 0.01, 0.1) +# {job="api-server", instance="0", group="production"} 30100 +# {job="api-server", instance="1", group="production"} 30200 +# {job="api-server", instance="0", group="canary"} 80300 +# {job="api-server", instance="1", group="canary"} 80000 + +# Unsupported by streaming engine. +# # double_exponential_smoothing should silently ignore ranges consisting only of histograms. +# eval instant at 10000s double_exponential_smoothing(http_requests_histogram[1m], 0.01, 0.1) +# #empty + # negative trends clear load 10s diff --git a/pkg/util/test/histogram.go b/pkg/util/test/histogram.go index cb6e73a6420..180c4385145 100644 --- a/pkg/util/test/histogram.go +++ b/pkg/util/test/histogram.go @@ -18,19 +18,19 @@ func GenerateTestFloatHistograms(i int) []*histogram.FloatHistogram { } func GenerateTestHistogram(i int) *histogram.Histogram { - return tsdbutil.GenerateTestHistogram(i) + return tsdbutil.GenerateTestHistogram(int64(i)) } func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram { - return tsdbutil.GenerateTestFloatHistogram(i) + return tsdbutil.GenerateTestFloatHistogram(int64(i)) } func GenerateTestGaugeHistogram(i int) *histogram.Histogram { - return tsdbutil.GenerateTestGaugeHistogram(i) + return tsdbutil.GenerateTestGaugeHistogram(int64(i)) } func GenerateTestGaugeFloatHistogram(i int) *histogram.FloatHistogram { - return tsdbutil.GenerateTestGaugeFloatHistogram(i) + return tsdbutil.GenerateTestGaugeFloatHistogram(int64(i)) } // explicit decoded version of GenerateTestHistogram and GenerateTestFloatHistogram diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index a754df909f6..39a47c85eb2 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,44 @@ # Changelog +## [0.13.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.12.1...auth/v0.13.0) (2024-12-13) + + +### Features + +* **auth:** Add logging support ([#11079](https://github.com/googleapis/google-cloud-go/issues/11079)) ([c80e31d](https://github.com/googleapis/google-cloud-go/commit/c80e31df5ecb33a810be3dfb9d9e27ac531aa91d)) +* **auth:** Pass logger from auth layer to metadata package ([#11288](https://github.com/googleapis/google-cloud-go/issues/11288)) ([b552efd](https://github.com/googleapis/google-cloud-go/commit/b552efd6ab34e5dfded18438e0fbfd925805614f)) + + +### Bug Fixes + +* **auth:** Check compute cred type before non-default flag for DP ([#11255](https://github.com/googleapis/google-cloud-go/issues/11255)) ([4347ca1](https://github.com/googleapis/google-cloud-go/commit/4347ca141892be8ae813399b4b437662a103bc90)) + +## [0.12.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.12.0...auth/v0.12.1) (2024-12-10) + + +### Bug Fixes + +* **auth:** Correct typo in link ([#11160](https://github.com/googleapis/google-cloud-go/issues/11160)) ([af6fb46](https://github.com/googleapis/google-cloud-go/commit/af6fb46d7cd694ddbe8c9d63bc4cdcd62b9fb2c1)) + +## [0.12.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.11.0...auth/v0.12.0) (2024-12-04) + + +### Features + +* **auth:** Add support for providing custom certificate URL ([#11006](https://github.com/googleapis/google-cloud-go/issues/11006)) ([ebf3657](https://github.com/googleapis/google-cloud-go/commit/ebf36579724afb375d3974cf1da38f703e3b7dbc)), refs [#11005](https://github.com/googleapis/google-cloud-go/issues/11005) + + +### Bug Fixes + +* **auth:** Ensure endpoints are present in Validator ([#11209](https://github.com/googleapis/google-cloud-go/issues/11209)) ([106cd53](https://github.com/googleapis/google-cloud-go/commit/106cd53309facaef1b8ea78376179f523f6912b9)), refs [#11006](https://github.com/googleapis/google-cloud-go/issues/11006) [#11190](https://github.com/googleapis/google-cloud-go/issues/11190) [#11189](https://github.com/googleapis/google-cloud-go/issues/11189) [#11188](https://github.com/googleapis/google-cloud-go/issues/11188) + +## [0.11.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.2...auth/v0.11.0) (2024-11-21) + + +### Features + +* **auth:** Add universe domain support to mTLS ([#11159](https://github.com/googleapis/google-cloud-go/issues/11159)) ([117748b](https://github.com/googleapis/google-cloud-go/commit/117748ba1cfd4ae62a6a4feb7e30951cb2bc9344)) + ## [0.10.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.1...auth/v0.10.2) (2024-11-12) diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go index a7fa84f6f95..cd5e9886848 100644 --- a/vendor/cloud.google.com/go/auth/auth.go +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -24,6 +24,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net/http" "net/url" "strings" @@ -32,6 +33,7 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/jwt" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -490,6 +492,11 @@ type Options2LO struct { // UseIDToken requests that the token returned be an ID token if one is // returned from the server. Optional. UseIDToken bool + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger } func (o *Options2LO) client() *http.Client { @@ -520,12 +527,13 @@ func New2LOTokenProvider(opts *Options2LO) (TokenProvider, error) { if err := opts.validate(); err != nil { return nil, err } - return tokenProvider2LO{opts: opts, Client: opts.client()}, nil + return tokenProvider2LO{opts: opts, Client: opts.client(), logger: internallog.New(opts.Logger)}, nil } type tokenProvider2LO struct { opts *Options2LO Client *http.Client + logger *slog.Logger } func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) { @@ -560,10 +568,12 @@ func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) { return nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + tp.logger.DebugContext(ctx, "2LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) resp, body, err := internal.DoRequest(tp.Client, req) if err != nil { return nil, fmt.Errorf("auth: cannot fetch token: %w", err) } + tp.logger.DebugContext(ctx, "2LO token response", "response", internallog.HTTPResponse(resp, body)) if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { return nil, &Error{ Response: resp, diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go index 6f70fa353b0..8afd0472eaa 100644 --- a/vendor/cloud.google.com/go/auth/credentials/compute.go +++ b/vendor/cloud.google.com/go/auth/credentials/compute.go @@ -37,8 +37,11 @@ var ( // computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that // uses the metadata service to retrieve tokens. -func computeTokenProvider(opts *DetectOptions) auth.TokenProvider { - return auth.NewCachedTokenProvider(computeProvider{scopes: opts.Scopes}, &auth.CachedTokenProviderOptions{ +func computeTokenProvider(opts *DetectOptions, client *metadata.Client) auth.TokenProvider { + return auth.NewCachedTokenProvider(&computeProvider{ + scopes: opts.Scopes, + client: client, + }, &auth.CachedTokenProviderOptions{ ExpireEarly: opts.EarlyTokenRefresh, DisableAsyncRefresh: opts.DisableAsyncRefresh, }) @@ -47,6 +50,7 @@ func computeTokenProvider(opts *DetectOptions) auth.TokenProvider { // computeProvider fetches tokens from the google cloud metadata service. type computeProvider struct { scopes []string + client *metadata.Client } type metadataTokenResp struct { @@ -55,7 +59,7 @@ type metadataTokenResp struct { TokenType string `json:"token_type"` } -func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) { +func (cs *computeProvider) Token(ctx context.Context) (*auth.Token, error) { tokenURI, err := url.Parse(computeTokenURI) if err != nil { return nil, err @@ -65,7 +69,7 @@ func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) { v.Set("scopes", strings.Join(cs.scopes, ",")) tokenURI.RawQuery = v.Encode() } - tokenJSON, err := metadata.GetWithContext(ctx, tokenURI.String()) + tokenJSON, err := cs.client.GetWithContext(ctx, tokenURI.String()) if err != nil { return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) } diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go index 010afc37c8f..a1b5a931884 100644 --- a/vendor/cloud.google.com/go/auth/credentials/detect.go +++ b/vendor/cloud.google.com/go/auth/credentials/detect.go @@ -19,6 +19,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net/http" "os" "time" @@ -27,6 +28,7 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" "cloud.google.com/go/compute/metadata" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -96,12 +98,17 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { } if OnGCE() { + metadataClient := metadata.NewWithOptions(&metadata.Options{ + Logger: opts.logger(), + }) return auth.NewCredentials(&auth.CredentialsOptions{ - TokenProvider: computeTokenProvider(opts), + TokenProvider: computeTokenProvider(opts, metadataClient), ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { - return metadata.ProjectIDWithContext(ctx) + return metadataClient.ProjectIDWithContext(ctx) }), - UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, + UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{ + MetadataClient: metadataClient, + }, }), nil } @@ -158,6 +165,11 @@ type DetectOptions struct { // The default value is "googleapis.com". This option is ignored for // authentication flows that do not support universe domain. Optional. UniverseDomain string + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger } func (o *DetectOptions) validate() error { @@ -193,6 +205,10 @@ func (o *DetectOptions) client() *http.Client { return internal.DefaultClient() } +func (o *DetectOptions) logger() *slog.Logger { + return internallog.New(o.Logger) +} + func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) { b, err := os.ReadFile(filename) if err != nil { @@ -253,6 +269,7 @@ func clientCredConfigFromJSON(b []byte, opts *DetectOptions) *auth.Options3LO { AuthURL: c.AuthURI, TokenURL: c.TokenURI, Client: opts.client(), + Logger: opts.logger(), EarlyTokenExpiry: opts.EarlyTokenRefresh, AuthHandlerOpts: handleOpts, // TODO(codyoss): refactor this out. We need to add in auto-detection diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go index 6591b181132..e5243e6cfbe 100644 --- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go +++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go @@ -141,6 +141,7 @@ func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) TokenURL: f.TokenURL, Subject: opts.Subject, Client: opts.client(), + Logger: opts.logger(), } if opts2LO.TokenURL == "" { opts2LO.TokenURL = jwtTokenURL @@ -159,6 +160,7 @@ func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions) EarlyTokenExpiry: opts.EarlyTokenRefresh, RefreshToken: f.RefreshToken, Client: opts.client(), + Logger: opts.logger(), } return auth.New3LOTokenProvider(opts3LO) } @@ -177,6 +179,7 @@ func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions Scopes: opts.scopes(), WorkforcePoolUserProject: f.WorkforcePoolUserProject, Client: opts.client(), + Logger: opts.logger(), IsDefaultClient: opts.Client == nil, } if f.ServiceAccountImpersonation != nil { @@ -195,6 +198,7 @@ func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedU ClientSecret: f.ClientSecret, Scopes: opts.scopes(), Client: opts.client(), + Logger: opts.logger(), } return externalaccountuser.NewTokenProvider(externalOpts) } @@ -214,6 +218,7 @@ func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFil Tp: tp, Delegates: f.Delegates, Client: opts.client(), + Logger: opts.logger(), }) } @@ -221,5 +226,6 @@ func handleGDCHServiceAccount(f *credsfile.GDCHServiceAccountFile, opts *DetectO return gdch.NewTokenProvider(f, &gdch.Options{ STSAudience: opts.STSAudience, Client: opts.client(), + Logger: opts.logger(), }) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go index d8b5d4fdeb9..9ecd1f64bd5 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go @@ -23,6 +23,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net/http" "net/url" "os" @@ -32,6 +33,7 @@ import ( "time" "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" ) var ( @@ -87,6 +89,7 @@ type awsSubjectProvider struct { reqOpts *RequestOptions Client *http.Client + logger *slog.Logger } func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) { @@ -192,10 +195,12 @@ func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, e } req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL) + sp.logger.DebugContext(ctx, "aws session token request", "request", internallog.HTTPRequest(req, nil)) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } + sp.logger.DebugContext(ctx, "aws session token response", "response", internallog.HTTPResponse(resp, body)) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", body) } @@ -225,10 +230,12 @@ func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string] for name, value := range headers { req.Header.Add(name, value) } + sp.logger.DebugContext(ctx, "aws region request", "request", internallog.HTTPRequest(req, nil)) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } + sp.logger.DebugContext(ctx, "aws region response", "response", internallog.HTTPResponse(resp, body)) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", body) } @@ -283,10 +290,12 @@ func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context for name, value := range headers { req.Header.Add(name, value) } + sp.logger.DebugContext(ctx, "aws security credential request", "request", internallog.HTTPRequest(req, nil)) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return result, err } + sp.logger.DebugContext(ctx, "aws security credential response", "response", internallog.HTTPResponse(resp, body)) if resp.StatusCode != http.StatusOK { return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", body) } @@ -308,10 +317,12 @@ func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers m req.Header.Add(name, value) } + sp.logger.DebugContext(ctx, "aws metadata role request", "request", internallog.HTTPRequest(req, nil)) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", err } + sp.logger.DebugContext(ctx, "aws metadata role response", "response", internallog.HTTPResponse(resp, body)) if resp.StatusCode != http.StatusOK { return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go index 112186a9e6e..a8220642348 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go @@ -18,6 +18,7 @@ import ( "context" "errors" "fmt" + "log/slog" "net/http" "regexp" "strconv" @@ -28,6 +29,7 @@ import ( "cloud.google.com/go/auth/credentials/internal/impersonate" "cloud.google.com/go/auth/credentials/internal/stsexchange" "cloud.google.com/go/auth/internal/credsfile" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -104,6 +106,11 @@ type Options struct { // This is important for X509 credentials which should create a new client if the default was used // but should respect a client explicitly passed in by the user. IsDefaultClient bool + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger } // SubjectTokenProvider can be used to supply a subject token to exchange for a @@ -224,6 +231,7 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { return nil, err } opts.resolveTokenURL() + logger := internallog.New(opts.Logger) stp, err := newSubjectTokenProvider(opts) if err != nil { return nil, err @@ -238,6 +246,7 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { client: client, opts: opts, stp: stp, + logger: logger, } if opts.ServiceAccountImpersonationURL == "" { @@ -254,6 +263,7 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { Scopes: scopes, Tp: auth.NewCachedTokenProvider(tp, nil), TokenLifetimeSeconds: opts.ServiceAccountImpersonationLifetimeSeconds, + Logger: logger, }) if err != nil { return nil, err @@ -269,6 +279,7 @@ type subjectTokenProvider interface { // tokenProvider is the provider that handles external credentials. It is used to retrieve Tokens. type tokenProvider struct { client *http.Client + logger *slog.Logger opts *Options stp subjectTokenProvider } @@ -310,6 +321,7 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { Authentication: clientAuth, Headers: header, ExtraOpts: options, + Logger: tp.logger, }) if err != nil { return nil, err @@ -330,12 +342,14 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { // newSubjectTokenProvider determines the type of credsfile.CredentialSource needed to create a // subjectTokenProvider func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { + logger := internallog.New(o.Logger) reqOpts := &RequestOptions{Audience: o.Audience, SubjectTokenType: o.SubjectTokenType} if o.AwsSecurityCredentialsProvider != nil { return &awsSubjectProvider{ securityCredentialsProvider: o.AwsSecurityCredentialsProvider, TargetResource: o.Audience, reqOpts: reqOpts, + logger: logger, }, nil } else if o.SubjectTokenProvider != nil { return &programmaticProvider{stp: o.SubjectTokenProvider, opts: reqOpts}, nil @@ -352,6 +366,7 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { CredVerificationURL: o.CredentialSource.URL, TargetResource: o.Audience, Client: o.Client, + logger: logger, } if o.CredentialSource.IMDSv2SessionTokenURL != "" { awsProvider.IMDSv2SessionTokenURL = o.CredentialSource.IMDSv2SessionTokenURL @@ -362,7 +377,13 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { } else if o.CredentialSource.File != "" { return &fileSubjectProvider{File: o.CredentialSource.File, Format: o.CredentialSource.Format}, nil } else if o.CredentialSource.URL != "" { - return &urlSubjectProvider{URL: o.CredentialSource.URL, Headers: o.CredentialSource.Headers, Format: o.CredentialSource.Format, Client: o.Client}, nil + return &urlSubjectProvider{ + URL: o.CredentialSource.URL, + Headers: o.CredentialSource.Headers, + Format: o.CredentialSource.Format, + Client: o.Client, + Logger: logger, + }, nil } else if o.CredentialSource.Executable != nil { ec := o.CredentialSource.Executable if ec.Command == "" { diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go index 0a020599e07..754ecf4fef9 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go @@ -19,10 +19,12 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net/http" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -38,6 +40,7 @@ type urlSubjectProvider struct { Headers map[string]string Format *credsfile.Format Client *http.Client + Logger *slog.Logger } func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) { @@ -49,10 +52,12 @@ func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) for key, val := range sp.Headers { req.Header.Add(key, val) } + sp.Logger.DebugContext(ctx, "url subject token request", "request", internallog.HTTPRequest(req, nil)) resp, body, err := internal.DoRequest(sp.Client, req) if err != nil { return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err) } + sp.Logger.DebugContext(ctx, "url subject token response", "response", internallog.HTTPResponse(resp, body)) if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { return "", fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go index 0d788547987..ae39206e5f3 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go @@ -17,12 +17,14 @@ package externalaccountuser import ( "context" "errors" + "log/slog" "net/http" "time" "cloud.google.com/go/auth" "cloud.google.com/go/auth/credentials/internal/stsexchange" "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" ) // Options stores the configuration for fetching tokens with external authorized @@ -51,6 +53,8 @@ type Options struct { // Client for token request. Client *http.Client + // Logger for logging. + Logger *slog.Logger } func (c *Options) validate() bool { @@ -90,6 +94,7 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) { RefreshToken: opts.RefreshToken, Authentication: clientAuth, Headers: headers, + Logger: internallog.New(tp.o.Logger), }) if err != nil { return nil, err diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go index 720045d3b07..c2d320fdf4c 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go @@ -16,12 +16,13 @@ package gdch import ( "context" - "crypto/rsa" + "crypto" "crypto/tls" "crypto/x509" "encoding/json" "errors" "fmt" + "log/slog" "net/http" "net/url" "os" @@ -32,6 +33,7 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/credsfile" "cloud.google.com/go/auth/internal/jwt" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -51,6 +53,7 @@ var ( type Options struct { STSAudience string Client *http.Client + Logger *slog.Logger } // NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] from a @@ -62,7 +65,7 @@ func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.Tok if o.STSAudience == "" { return nil, errors.New("credentials: STSAudience must be set for the GDCH auth flows") } - pk, err := internal.ParseKey([]byte(f.PrivateKey)) + signer, err := internal.ParseKey([]byte(f.PrivateKey)) if err != nil { return nil, err } @@ -75,10 +78,11 @@ func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.Tok serviceIdentity: fmt.Sprintf("system:serviceaccount:%s:%s", f.Project, f.Name), tokenURL: f.TokenURL, aud: o.STSAudience, - pk: pk, + signer: signer, pkID: f.PrivateKeyID, certPool: certPool, client: o.Client, + logger: internallog.New(o.Logger), } return tp, nil } @@ -97,11 +101,12 @@ type gdchProvider struct { serviceIdentity string tokenURL string aud string - pk *rsa.PrivateKey + signer crypto.Signer pkID string certPool *x509.CertPool client *http.Client + logger *slog.Logger } func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { @@ -120,7 +125,7 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { Type: jwt.HeaderType, KeyID: string(g.pkID), } - payload, err := jwt.EncodeJWS(&h, &claims, g.pk) + payload, err := jwt.EncodeJWS(&h, &claims, g.signer) if err != nil { return nil, err } @@ -136,10 +141,12 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) { return nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + g.logger.DebugContext(ctx, "gdch token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) resp, body, err := internal.DoRequest(g.client, req) if err != nil { return nil, fmt.Errorf("credentials: cannot fetch token: %w", err) } + g.logger.DebugContext(ctx, "gdch token response", "response", internallog.HTTPResponse(resp, body)) if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { return nil, &auth.Error{ Response: resp, diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go index ed53afa519e..b3a99261fa9 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go @@ -20,11 +20,13 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "net/http" "time" "cloud.google.com/go/auth" "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -74,6 +76,11 @@ type Options struct { // Client configures the underlying client used to make network requests // when fetching tokens. Required. Client *http.Client + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger } func (o *Options) validate() error { @@ -88,6 +95,7 @@ func (o *Options) validate() error { // Token performs the exchange to get a temporary service account token to allow access to GCP. func (o *Options) Token(ctx context.Context) (*auth.Token, error) { + logger := internallog.New(o.Logger) lifetime := defaultTokenLifetime if o.TokenLifetimeSeconds != 0 { lifetime = fmt.Sprintf("%ds", o.TokenLifetimeSeconds) @@ -109,10 +117,12 @@ func (o *Options) Token(ctx context.Context) (*auth.Token, error) { if err := setAuthHeader(ctx, o.Tp, req); err != nil { return nil, err } + logger.DebugContext(ctx, "impersonated token request", "request", internallog.HTTPRequest(req, b)) resp, body, err := internal.DoRequest(o.Client, req) if err != nil { return nil, fmt.Errorf("credentials: unable to generate access token: %w", err) } + logger.DebugContext(ctx, "impersonated token response", "response", internallog.HTTPResponse(resp, body)) if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices { return nil, fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go index 768a9dafc13..e1d2b15034d 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go @@ -19,6 +19,7 @@ import ( "encoding/base64" "encoding/json" "fmt" + "log/slog" "net/http" "net/url" "strconv" @@ -26,6 +27,7 @@ import ( "cloud.google.com/go/auth" "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" ) const ( @@ -40,6 +42,7 @@ const ( // Options stores the configuration for making an sts exchange request. type Options struct { Client *http.Client + Logger *slog.Logger Endpoint string Request *TokenRequest Authentication ClientAuthentication @@ -80,6 +83,7 @@ func ExchangeToken(ctx context.Context, opts *Options) (*TokenResponse, error) { func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenResponse, error) { opts.Authentication.InjectAuthentication(data, opts.Headers) encodedData := data.Encode() + logger := internallog.New(opts.Logger) req, err := http.NewRequestWithContext(ctx, "POST", opts.Endpoint, strings.NewReader(encodedData)) if err != nil { @@ -93,10 +97,12 @@ func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenRespo } req.Header.Set("Content-Length", strconv.Itoa(len(encodedData))) + logger.DebugContext(ctx, "sts token request", "request", internallog.HTTPRequest(req, []byte(encodedData))) resp, body, err := internal.DoRequest(opts.Client, req) if err != nil { return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err) } + logger.DebugContext(ctx, "sts token response", "response", internallog.HTTPResponse(resp, body)) if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices { return nil, fmt.Errorf("credentials: status code %d: %s", c, body) } diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go index 6ae29de6c27..8d335ccecc9 100644 --- a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go +++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go @@ -16,9 +16,10 @@ package credentials import ( "context" - "crypto/rsa" + "crypto" "errors" "fmt" + "log/slog" "strings" "time" @@ -39,7 +40,7 @@ func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions if len(opts.scopes()) == 0 && opts.Audience == "" { return nil, errors.New("credentials: both scopes and audience are empty") } - pk, err := internal.ParseKey([]byte(f.PrivateKey)) + signer, err := internal.ParseKey([]byte(f.PrivateKey)) if err != nil { return nil, fmt.Errorf("credentials: could not parse key: %w", err) } @@ -47,8 +48,9 @@ func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions email: f.ClientEmail, audience: opts.Audience, scopes: opts.scopes(), - pk: pk, + signer: signer, pkID: f.PrivateKeyID, + logger: opts.logger(), }, nil } @@ -56,8 +58,9 @@ type selfSignedTokenProvider struct { email string audience string scopes []string - pk *rsa.PrivateKey + signer crypto.Signer pkID string + logger *slog.Logger } func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) { @@ -77,9 +80,10 @@ func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) { Type: jwt.HeaderType, KeyID: string(tp.pkID), } - msg, err := jwt.EncodeJWS(h, c, tp.pk) + tok, err := jwt.EncodeJWS(h, c, tp.signer) if err != nil { return nil, fmt.Errorf("credentials: could not encode JWT: %w", err) } - return &auth.Token{Value: msg, Type: internal.TokenTypeBearer, Expiry: exp}, nil + tp.logger.Debug("created self-signed JWT", "token", tok) + return &auth.Token{Value: tok, Type: internal.TokenTypeBearer, Expiry: exp}, nil } diff --git a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go index 8696df1487f..d781c3e49a9 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go @@ -66,12 +66,12 @@ func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, o *Options) bool if tok == nil { return false } - if o.InternalOptions != nil && o.InternalOptions.EnableNonDefaultSAForDirectPath { - return true - } if tok.MetadataString("auth.google.tokenSource") != "compute-metadata" { return false } + if o.InternalOptions != nil && o.InternalOptions.EnableNonDefaultSAForDirectPath { + return true + } if tok.MetadataString("auth.google.serviceAccount") != "default" { return false } diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go index 38212ed0f82..95f259037f2 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go @@ -21,6 +21,7 @@ import ( "crypto/tls" "errors" "fmt" + "log/slog" "net/http" "os" "sync" @@ -29,7 +30,7 @@ import ( "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" - "go.opencensus.io/plugin/ocgrpc" + "github.com/googleapis/gax-go/v2/internallog" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "google.golang.org/grpc" grpccreds "google.golang.org/grpc/credentials" @@ -117,6 +118,11 @@ type Options struct { // APIKey specifies an API key to be used as the basis for authentication. // If set DetectOpts are ignored. APIKey string + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger // InternalOptions are NOT meant to be set directly by consumers of this // package, they should only be set by generated client code. @@ -132,6 +138,10 @@ func (o *Options) client() *http.Client { return nil } +func (o *Options) logger() *slog.Logger { + return internallog.New(o.Logger) +} + func (o *Options) validate() error { if o == nil { return errors.New("grpctransport: opts required to be non-nil") @@ -173,6 +183,9 @@ func (o *Options) resolveDetectOptions() *credentials.DetectOptions { do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) do.TokenURL = credentials.GoogleMTLSTokenURL } + if do.Logger == nil { + do.Logger = o.logger() + } return do } @@ -241,6 +254,7 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er ClientCertProvider: opts.ClientCertProvider, Client: opts.client(), UniverseDomain: opts.UniverseDomain, + Logger: opts.logger(), } if io := opts.InternalOptions; io != nil { tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate @@ -318,7 +332,6 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er // Add tracing, but before the other options, so that clients can override the // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. - grpcOpts = addOCStatsHandler(grpcOpts, opts) grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, opts) grpcOpts = append(grpcOpts, opts.GRPCDialOpts...) @@ -417,13 +430,6 @@ func (c *grpcCredentialsProvider) RequireTransportSecurity() bool { return c.secure } -func addOCStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption { - if opts.DisableTelemetry { - return dialOpts - } - return append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) -} - func addOpenTelemetryStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption { if opts.DisableTelemetry { return dialOpts diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go index cbe5a7a40a7..5758e85b5db 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -20,12 +20,14 @@ import ( "crypto/tls" "errors" "fmt" + "log/slog" "net/http" "cloud.google.com/go/auth" detect "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" + "github.com/googleapis/gax-go/v2/internallog" ) // ClientCertProvider is a function that returns a TLS client certificate to be @@ -69,6 +71,11 @@ type Options struct { // configured for the client, which will be compared to the universe domain // that is separately configured for the credentials. UniverseDomain string + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger // InternalOptions are NOT meant to be set directly by consumers of this // package, they should only be set by generated client code. @@ -101,6 +108,10 @@ func (o *Options) client() *http.Client { return nil } +func (o *Options) logger() *slog.Logger { + return internallog.New(o.Logger) +} + func (o *Options) resolveDetectOptions() *detect.DetectOptions { io := o.InternalOptions // soft-clone these so we are not updating a ref the user holds and may reuse @@ -125,6 +136,9 @@ func (o *Options) resolveDetectOptions() *detect.DetectOptions { do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig) do.TokenURL = detect.GoogleMTLSTokenURL } + if do.Logger == nil { + do.Logger = o.logger() + } return do } @@ -197,6 +211,7 @@ func NewClient(opts *Options) (*http.Client, error) { ClientCertProvider: opts.ClientCertProvider, Client: opts.client(), UniverseDomain: opts.UniverseDomain, + Logger: opts.logger(), } if io := opts.InternalOptions; io != nil { tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate diff --git a/vendor/cloud.google.com/go/auth/httptransport/trace.go b/vendor/cloud.google.com/go/auth/httptransport/trace.go deleted file mode 100644 index 467c477c04d..00000000000 --- a/vendor/cloud.google.com/go/auth/httptransport/trace.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httptransport - -import ( - "encoding/binary" - "encoding/hex" - "fmt" - "net/http" - "strconv" - "strings" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -const ( - httpHeaderMaxSize = 200 - cloudTraceHeader = `X-Cloud-Trace-Context` -) - -// asserts the httpFormat fulfills this foreign interface -var _ propagation.HTTPFormat = (*httpFormat)(nil) - -// httpFormat implements propagation.httpFormat to propagate -// traces in HTTP headers for Google Cloud Platform and Cloud Trace. -type httpFormat struct{} - -// SpanContextFromRequest extracts a Cloud Trace span context from incoming requests. -func (f *httpFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { - h := req.Header.Get(cloudTraceHeader) - // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. - // Return if the header is empty or missing, or if the header is unreasonably - // large, to avoid making unnecessary copies of a large string. - if h == "" || len(h) > httpHeaderMaxSize { - return trace.SpanContext{}, false - } - - // Parse the trace id field. - slash := strings.Index(h, `/`) - if slash == -1 { - return trace.SpanContext{}, false - } - tid, h := h[:slash], h[slash+1:] - - buf, err := hex.DecodeString(tid) - if err != nil { - return trace.SpanContext{}, false - } - copy(sc.TraceID[:], buf) - - // Parse the span id field. - spanstr := h - semicolon := strings.Index(h, `;`) - if semicolon != -1 { - spanstr, h = h[:semicolon], h[semicolon+1:] - } - sid, err := strconv.ParseUint(spanstr, 10, 64) - if err != nil { - return trace.SpanContext{}, false - } - binary.BigEndian.PutUint64(sc.SpanID[:], sid) - - // Parse the options field, options field is optional. - if !strings.HasPrefix(h, "o=") { - return sc, true - } - o, err := strconv.ParseUint(h[2:], 10, 32) - if err != nil { - return trace.SpanContext{}, false - } - sc.TraceOptions = trace.TraceOptions(o) - return sc, true -} - -// SpanContextToRequest modifies the given request to include a Cloud Trace header. -func (f *httpFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { - sid := binary.BigEndian.Uint64(sc.SpanID[:]) - header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) - req.Header.Set(cloudTraceHeader, header) -} diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go index 1d139b9dc49..ee215b6dc6c 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/transport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -27,7 +27,6 @@ import ( "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" "cloud.google.com/go/auth/internal/transport/cert" - "go.opencensus.io/plugin/ochttp" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "golang.org/x/net/http2" ) @@ -43,10 +42,7 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err headers: headers, } var trans http.RoundTripper = ht - // Give OpenTelemetry precedence over OpenCensus in case user configuration - // causes both to write the same header (`X-Cloud-Trace-Context`). trans = addOpenTelemetryTransport(trans, opts) - trans = addOCTransport(trans, opts) switch { case opts.DisableAuthentication: // Do nothing. @@ -179,16 +175,6 @@ func addOpenTelemetryTransport(trans http.RoundTripper, opts *Options) http.Roun return otelhttp.NewTransport(trans) } -func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { - if opts.DisableTelemetry { - return trans - } - return &ochttp.Transport{ - Base: trans, - Propagation: &httpFormat{}, - } -} - type authTransport struct { creds *auth.Credentials base http.RoundTripper diff --git a/vendor/cloud.google.com/go/auth/internal/compute/compute.go b/vendor/cloud.google.com/go/auth/internal/compute/compute.go index 651bd61fbbc..05c7e8bdd49 100644 --- a/vendor/cloud.google.com/go/auth/internal/compute/compute.go +++ b/vendor/cloud.google.com/go/auth/internal/compute/compute.go @@ -31,8 +31,7 @@ var ( // This is a copy of the gRPC internal googlecloud.OnGCE() func at: // https://github.com/grpc/grpc-go/blob/master/internal/googlecloud/googlecloud.go // The functionality is similar to the metadata.OnGCE() func at: -// https://github.com/xmenxk/google-cloud-go/blob/main/compute/metadata/metadata.go -// +// https://github.com/googleapis/google-cloud-go/blob/main/compute/metadata/metadata.go // The difference is that OnComputeEngine() does not perform HTTP or DNS check on the metadata server. // In particular, OnComputeEngine() will return false on Serverless. func OnComputeEngine() bool { diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go index d8c16119180..6f4ef43bba3 100644 --- a/vendor/cloud.google.com/go/auth/internal/internal.go +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -16,7 +16,7 @@ package internal import ( "context" - "crypto/rsa" + "crypto" "crypto/x509" "encoding/json" "encoding/pem" @@ -72,25 +72,27 @@ func DefaultClient() *http.Client { } // ParseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a +// to an crypto.Signer. It detects whether the private key is in a // PEM container or not. If so, it extracts the the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. -func ParseKey(key []byte) (*rsa.PrivateKey, error) { +func ParseKey(key []byte) (crypto.Signer, error) { block, _ := pem.Decode(key) if block != nil { key = block.Bytes } - parsedKey, err := x509.ParsePKCS8PrivateKey(key) + var parsedKey crypto.PrivateKey + var err error + parsedKey, err = x509.ParsePKCS8PrivateKey(key) if err != nil { parsedKey, err = x509.ParsePKCS1PrivateKey(key) if err != nil { return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8: %w", err) } } - parsed, ok := parsedKey.(*rsa.PrivateKey) + parsed, ok := parsedKey.(crypto.Signer) if !ok { - return nil, errors.New("private key is invalid") + return nil, errors.New("private key is not a signer") } return parsed, nil } @@ -179,6 +181,7 @@ func (p StaticProperty) GetProperty(context.Context) (string, error) { // ComputeUniverseDomainProvider fetches the credentials universe domain from // the google cloud metadata service. type ComputeUniverseDomainProvider struct { + MetadataClient *metadata.Client universeDomainOnce sync.Once universeDomain string universeDomainErr error @@ -188,7 +191,7 @@ type ComputeUniverseDomainProvider struct { // metadata service. func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string, error) { c.universeDomainOnce.Do(func() { - c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx) + c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx, c.MetadataClient) }) if c.universeDomainErr != nil { return "", c.universeDomainErr @@ -197,14 +200,14 @@ func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string } // httpGetMetadataUniverseDomain is a package var for unit test substitution. -var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) { +var httpGetMetadataUniverseDomain = func(ctx context.Context, client *metadata.Client) (string, error) { ctx, cancel := context.WithTimeout(ctx, 1*time.Second) defer cancel() - return metadata.GetWithContext(ctx, "universe/universe-domain") + return client.GetWithContext(ctx, "universe/universe-domain") } -func getMetadataUniverseDomain(ctx context.Context) (string, error) { - universeDomain, err := httpGetMetadataUniverseDomain(ctx) +func getMetadataUniverseDomain(ctx context.Context, client *metadata.Client) (string, error) { + universeDomain, err := httpGetMetadataUniverseDomain(ctx, client) if err == nil { return universeDomain, nil } diff --git a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go index dc28b3c3bb5..9bd55f510cc 100644 --- a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go +++ b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go @@ -111,7 +111,7 @@ func (c *Claims) encode() (string, error) { } // EncodeJWS encodes the data using the provided key as a JSON web signature. -func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) { +func EncodeJWS(header *Header, c *Claims, signer crypto.Signer) (string, error) { head, err := header.encode() if err != nil { return "", err @@ -123,7 +123,7 @@ func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) { ss := fmt.Sprintf("%s.%s", head, claims) h := sha256.New() h.Write([]byte(ss)) - sig, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + sig, err := signer.Sign(rand.Reader, h.Sum(nil), crypto.SHA256) if err != nil { return "", err } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go index f606888f120..2f922f7dfef 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cba.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -20,6 +20,7 @@ import ( "crypto/x509" "errors" "log" + "log/slog" "net" "net/http" "net/url" @@ -51,22 +52,19 @@ const ( mtlsMDSKey = "/run/google-mds-mtls/client.key" ) -var ( - errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com") -) - // Options is a struct that is duplicated information from the individual // transport packages in order to avoid cyclic deps. It correlates 1:1 with // fields on httptransport.Options and grpctransport.Options. type Options struct { Endpoint string - DefaultMTLSEndpoint string DefaultEndpointTemplate string + DefaultMTLSEndpoint string ClientCertProvider cert.Provider Client *http.Client UniverseDomain string EnableDirectPath bool EnableDirectPathXds bool + Logger *slog.Logger } // getUniverseDomain returns the default service domain for a given Cloud @@ -94,6 +92,16 @@ func (o *Options) defaultEndpoint() string { return strings.Replace(o.DefaultEndpointTemplate, universeDomainPlaceholder, o.getUniverseDomain(), 1) } +// defaultMTLSEndpoint returns the DefaultMTLSEndpointTemplate merged with the +// universe domain if the DefaultMTLSEndpointTemplate is set, otherwise returns an +// empty string. +func (o *Options) defaultMTLSEndpoint() string { + if o.DefaultMTLSEndpoint == "" { + return "" + } + return strings.Replace(o.DefaultMTLSEndpoint, universeDomainPlaceholder, o.getUniverseDomain(), 1) +} + // mergedEndpoint merges a user-provided Endpoint of format host[:port] with the // default endpoint. func (o *Options) mergedEndpoint() (string, error) { @@ -256,12 +264,9 @@ func getTransportConfig(opts *Options) (*transportConfig, error) { if !shouldUseS2A(clientCertSource, opts) { return &defaultTransportConfig, nil } - if !opts.isUniverseDomainGDU() { - return nil, errUniverseNotSupportedMTLS - } - s2aAddress := GetS2AAddress() - mtlsS2AAddress := GetMTLSS2AAddress() + s2aAddress := GetS2AAddress(opts.Logger) + mtlsS2AAddress := GetMTLSS2AAddress(opts.Logger) if s2aAddress == "" && mtlsS2AAddress == "" { return &defaultTransportConfig, nil } @@ -270,7 +275,7 @@ func getTransportConfig(opts *Options) (*transportConfig, error) { endpoint: endpoint, s2aAddress: s2aAddress, mtlsS2AAddress: mtlsS2AAddress, - s2aMTLSEndpoint: opts.DefaultMTLSEndpoint, + s2aMTLSEndpoint: opts.defaultMTLSEndpoint(), }, nil } @@ -316,24 +321,23 @@ type transportConfig struct { // getEndpoint returns the endpoint for the service, taking into account the // user-provided endpoint override "settings.Endpoint". // -// If no endpoint override is specified, we will either return the default endpoint or -// the default mTLS endpoint if a client certificate is available. +// If no endpoint override is specified, we will either return the default +// endpoint or the default mTLS endpoint if a client certificate is available. // -// You can override the default endpoint choice (mtls vs. regular) by setting the -// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. +// You can override the default endpoint choice (mTLS vs. regular) by setting +// the GOOGLE_API_USE_MTLS_ENDPOINT environment variable. // // If the endpoint override is an address (host:port) rather than full base // URL (ex. https://...), then the user-provided address will be merged into // the default endpoint. For example, WithEndpoint("myhost:8000") and -// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return "https://myhost:8080/bar/baz" +// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return +// "https://myhost:8080/bar/baz". Note that this does not apply to the mTLS +// endpoint. func getEndpoint(opts *Options, clientCertSource cert.Provider) (string, error) { if opts.Endpoint == "" { mtlsMode := getMTLSMode() if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { - if !opts.isUniverseDomainGDU() { - return "", errUniverseNotSupportedMTLS - } - return opts.DefaultMTLSEndpoint, nil + return opts.defaultMTLSEndpoint(), nil } return opts.defaultEndpoint(), nil } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go index 37894bfcd01..a6330995636 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go @@ -19,6 +19,7 @@ import ( "encoding/json" "fmt" "log" + "log/slog" "os" "strconv" "sync" @@ -39,8 +40,8 @@ var ( // GetS2AAddress returns the S2A address to be reached via plaintext connection. // Returns empty string if not set or invalid. -func GetS2AAddress() string { - getMetadataMTLSAutoConfig() +func GetS2AAddress(logger *slog.Logger) string { + getMetadataMTLSAutoConfig(logger) if !mtlsConfiguration.valid() { return "" } @@ -49,8 +50,8 @@ func GetS2AAddress() string { // GetMTLSS2AAddress returns the S2A address to be reached via MTLS connection. // Returns empty string if not set or invalid. -func GetMTLSS2AAddress() string { - getMetadataMTLSAutoConfig() +func GetMTLSS2AAddress(logger *slog.Logger) string { + getMetadataMTLSAutoConfig(logger) if !mtlsConfiguration.valid() { return "" } @@ -74,22 +75,25 @@ type s2aAddresses struct { MTLSAddress string `json:"mtls_address"` } -func getMetadataMTLSAutoConfig() { +func getMetadataMTLSAutoConfig(logger *slog.Logger) { var err error mtlsOnce.Do(func() { - mtlsConfiguration, err = queryConfig() + mtlsConfiguration, err = queryConfig(logger) if err != nil { log.Printf("Getting MTLS config failed: %v", err) } }) } -var httpGetMetadataMTLSConfig = func() (string, error) { - return metadata.GetWithContext(context.Background(), configEndpointSuffix) +var httpGetMetadataMTLSConfig = func(logger *slog.Logger) (string, error) { + metadataClient := metadata.NewWithOptions(&metadata.Options{ + Logger: logger, + }) + return metadataClient.GetWithContext(context.Background(), configEndpointSuffix) } -func queryConfig() (*mtlsConfig, error) { - resp, err := httpGetMetadataMTLSConfig() +func queryConfig(logger *slog.Logger) (*mtlsConfig, error) { + resp, err := httpGetMetadataMTLSConfig(logger) if err != nil { return nil, fmt.Errorf("querying MTLS config from MDS endpoint failed: %w", err) } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go index cc586ec5b1a..992ac40df0b 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/transport.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go @@ -49,6 +49,7 @@ func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOpt // These fields are are pointer types that we just want to use exactly // as the user set, copy the ref Client: oldDo.Client, + Logger: oldDo.Logger, AuthHandlerOptions: oldDo.AuthHandlerOptions, } diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md index 3df9f72cad8..a1ef2923799 100644 --- a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md @@ -1,5 +1,12 @@ # Changelog +## [0.2.6](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.5...auth/oauth2adapt/v0.2.6) (2024-11-21) + + +### Bug Fixes + +* **auth/oauth2adapt:** Copy map in tokenSourceAdapter.Token ([#11164](https://github.com/googleapis/google-cloud-go/issues/11164)) ([8cb0cbc](https://github.com/googleapis/google-cloud-go/commit/8cb0cbccdc32886dfb3af49fee04012937d114d2)), refs [#11161](https://github.com/googleapis/google-cloud-go/issues/11161) + ## [0.2.5](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.4...auth/oauth2adapt/v0.2.5) (2024-10-30) diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go index e266b0c9b15..9cc33e5ee64 100644 --- a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go @@ -100,8 +100,13 @@ func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) { Expiry: tok.Expiry, } // Preserve token metadata. - metadata := tok.Metadata - if metadata != nil { + m := tok.Metadata + if m != nil { + // Copy map to avoid concurrent map writes error (#11161). + metadata := make(map[string]interface{}, len(m)+2) + for k, v := range m { + metadata[k] = v + } // Append compute token metadata in converted form. if val, ok := metadata[authTokenSourceKey].(string); ok && val != "" { metadata[oauth2TokenSourceKey] = val diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go index 97a57f4694b..07804dc162d 100644 --- a/vendor/cloud.google.com/go/auth/threelegged.go +++ b/vendor/cloud.google.com/go/auth/threelegged.go @@ -20,6 +20,7 @@ import ( "encoding/json" "errors" "fmt" + "log/slog" "mime" "net/http" "net/url" @@ -28,6 +29,7 @@ import ( "time" "cloud.google.com/go/auth/internal" + "github.com/googleapis/gax-go/v2/internallog" ) // AuthorizationHandler is a 3-legged-OAuth helper that prompts the user for @@ -69,6 +71,11 @@ type Options3LO struct { // AuthHandlerOpts provides a set of options for doing a // 3-legged OAuth2 flow with a custom [AuthorizationHandler]. Optional. AuthHandlerOpts *AuthorizationHandlerOptions + // Logger is used for debug logging. If provided, logging will be enabled + // at the loggers configured level. By default logging is disabled unless + // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default + // logger will be used. Optional. + Logger *slog.Logger } func (o *Options3LO) validate() error { @@ -96,6 +103,10 @@ func (o *Options3LO) validate() error { return nil } +func (o *Options3LO) logger() *slog.Logger { + return internallog.New(o.Logger) +} + // PKCEOptions holds parameters to support PKCE. type PKCEOptions struct { // Challenge is the un-padded, base64-url-encoded string of the encrypted code verifier. @@ -293,12 +304,15 @@ func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, strin if o.AuthStyle == StyleInHeader { req.SetBasicAuth(url.QueryEscape(o.ClientID), url.QueryEscape(o.ClientSecret)) } + logger := o.logger() + logger.DebugContext(ctx, "3LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode()))) // Make request resp, body, err := internal.DoRequest(o.client(), req) if err != nil { return nil, refreshToken, err } + logger.DebugContext(ctx, "3LO token response", "response", internallog.HTTPResponse(resp, body)) failureStatus := resp.StatusCode < 200 || resp.StatusCode > 299 tokError := &Error{ Response: resp, diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index da7db19b1c6..bcfb5d81659 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,12 @@ # Changes +## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.2...compute/metadata/v0.6.0) (2024-12-13) + + +### Features + +* **compute/metadata:** Add debug logging ([#11078](https://github.com/googleapis/google-cloud-go/issues/11078)) ([a816814](https://github.com/googleapis/google-cloud-go/commit/a81681463906e4473570a2f426eb0dc2de64e53f)) + ## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20) diff --git a/vendor/cloud.google.com/go/compute/metadata/log.go b/vendor/cloud.google.com/go/compute/metadata/log.go new file mode 100644 index 00000000000..8ec673b8823 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/log.go @@ -0,0 +1,149 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log/slog" + "net/http" + "strings" +) + +// Code below this point is copied from github.com/googleapis/gax-go/v2/internallog +// to avoid the dependency. The compute/metadata module is used by too many +// non-client library modules that can't justify the dependency. + +// The handler returned if logging is not enabled. +type noOpHandler struct{} + +func (h noOpHandler) Enabled(_ context.Context, _ slog.Level) bool { + return false +} + +func (h noOpHandler) Handle(_ context.Context, _ slog.Record) error { + return nil +} + +func (h noOpHandler) WithAttrs(_ []slog.Attr) slog.Handler { + return h +} + +func (h noOpHandler) WithGroup(_ string) slog.Handler { + return h +} + +// httpRequest returns a lazily evaluated [slog.LogValuer] for a +// [http.Request] and the associated body. +func httpRequest(req *http.Request, body []byte) slog.LogValuer { + return &request{ + req: req, + payload: body, + } +} + +type request struct { + req *http.Request + payload []byte +} + +func (r *request) LogValue() slog.Value { + if r == nil || r.req == nil { + return slog.Value{} + } + var groupValueAttrs []slog.Attr + groupValueAttrs = append(groupValueAttrs, slog.String("method", r.req.Method)) + groupValueAttrs = append(groupValueAttrs, slog.String("url", r.req.URL.String())) + + var headerAttr []slog.Attr + for k, val := range r.req.Header { + headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ","))) + } + if len(headerAttr) > 0 { + groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr)) + } + + if len(r.payload) > 0 { + if attr, ok := processPayload(r.payload); ok { + groupValueAttrs = append(groupValueAttrs, attr) + } + } + return slog.GroupValue(groupValueAttrs...) +} + +// httpResponse returns a lazily evaluated [slog.LogValuer] for a +// [http.Response] and the associated body. +func httpResponse(resp *http.Response, body []byte) slog.LogValuer { + return &response{ + resp: resp, + payload: body, + } +} + +type response struct { + resp *http.Response + payload []byte +} + +func (r *response) LogValue() slog.Value { + if r == nil { + return slog.Value{} + } + var groupValueAttrs []slog.Attr + groupValueAttrs = append(groupValueAttrs, slog.String("status", fmt.Sprint(r.resp.StatusCode))) + + var headerAttr []slog.Attr + for k, val := range r.resp.Header { + headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ","))) + } + if len(headerAttr) > 0 { + groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr)) + } + + if len(r.payload) > 0 { + if attr, ok := processPayload(r.payload); ok { + groupValueAttrs = append(groupValueAttrs, attr) + } + } + return slog.GroupValue(groupValueAttrs...) +} + +func processPayload(payload []byte) (slog.Attr, bool) { + peekChar := payload[0] + if peekChar == '{' { + // JSON object + var m map[string]any + if err := json.Unmarshal(payload, &m); err == nil { + return slog.Any("payload", m), true + } + } else if peekChar == '[' { + // JSON array + var m []any + if err := json.Unmarshal(payload, &m); err == nil { + return slog.Any("payload", m), true + } + } else { + // Everything else + buf := &bytes.Buffer{} + if err := json.Compact(buf, payload); err != nil { + // Write raw payload incase of error + buf.Write(payload) + } + return slog.String("payload", buf.String()), true + } + return slog.Attr{}, false +} diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index c160b4786bb..4c18a383a43 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -24,6 +24,7 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "net" "net/http" "net/url" @@ -60,7 +61,10 @@ var ( instID = &cachedValue{k: "instance/id", trim: true} ) -var defaultClient = &Client{hc: newDefaultHTTPClient()} +var defaultClient = &Client{ + hc: newDefaultHTTPClient(), + logger: slog.New(noOpHandler{}), +} func newDefaultHTTPClient() *http.Client { return &http.Client{ @@ -408,17 +412,42 @@ func strsContains(ss []string, s string) bool { // A Client provides metadata. type Client struct { - hc *http.Client + hc *http.Client + logger *slog.Logger +} + +// Options for configuring a [Client]. +type Options struct { + // Client is the HTTP client used to make requests. Optional. + Client *http.Client + // Logger is used to log information about HTTP request and responses. + // If not provided, nothing will be logged. Optional. + Logger *slog.Logger } // NewClient returns a Client that can be used to fetch metadata. // Returns the client that uses the specified http.Client for HTTP requests. // If nil is specified, returns the default client. func NewClient(c *http.Client) *Client { - if c == nil { + return NewWithOptions(&Options{ + Client: c, + }) +} + +// NewWithOptions returns a Client that is configured with the provided Options. +func NewWithOptions(opts *Options) *Client { + if opts == nil { return defaultClient } - return &Client{hc: c} + client := opts.Client + if client == nil { + client = newDefaultHTTPClient() + } + logger := opts.Logger + if logger == nil { + logger = slog.New(noOpHandler{}) + } + return &Client{hc: client, logger: logger} } // getETag returns a value from the metadata service as well as the associated ETag. @@ -448,12 +477,21 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string req.Header.Set("User-Agent", userAgent) var res *http.Response var reqErr error + var body []byte retryer := newRetryer() for { + c.logger.DebugContext(ctx, "metadata request", "request", httpRequest(req, nil)) res, reqErr = c.hc.Do(req) var code int if res != nil { code = res.StatusCode + body, err = io.ReadAll(res.Body) + if err != nil { + res.Body.Close() + return "", "", err + } + c.logger.DebugContext(ctx, "metadata response", "response", httpResponse(res, body)) + res.Body.Close() } if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { if res != nil && res.Body != nil { @@ -469,18 +507,13 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string if reqErr != nil { return "", "", reqErr } - defer res.Body.Close() if res.StatusCode == http.StatusNotFound { return "", "", NotDefinedError(suffix) } - all, err := io.ReadAll(res.Body) - if err != nil { - return "", "", err - } if res.StatusCode != 200 { - return "", "", &Error{Code: res.StatusCode, Message: string(all)} + return "", "", &Error{Code: res.StatusCode, Message: string(body)} } - return string(all), res.Header.Get("Etag"), nil + return string(body), res.Header.Get("Etag"), nil } // Get returns a value from the metadata service. diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index ffc7b992b3c..f4e7dbf37b3 100644 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-13-2 + image_family: freebsd-14-1 install_script: - pkg update -f - pkg install -y go @@ -9,5 +9,6 @@ freebsd_task: # run tests as user "cirrus" instead of root - pw useradd cirrus -m - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad895851e5..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001be0a..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 391cc076b12..daea9dd6d6d 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -5,3 +5,6 @@ # Output of go build ./cmd/fsnotify /fsnotify /fsnotify.exe + +/test/kqueue +/test/a.out diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index e0e57575496..fa854785d0f 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,8 +1,36 @@ # Changelog -Unreleased ----------- -Nothing yet. +1.8.0 2023-10-31 +---------------- + +### Additions + +- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619]) + +### Changes and fixes + +- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610]) + +- kqueue: ignore events with Ident=0 ([#590]) + +- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617]) + +- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625]) + +- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620]) + +- inotify: fix panic when calling Remove() in a goroutine ([#650]) + +- fen: allow watching subdirectories of watched directories ([#621]) + +[#590]: https://github.com/fsnotify/fsnotify/pull/590 +[#610]: https://github.com/fsnotify/fsnotify/pull/610 +[#617]: https://github.com/fsnotify/fsnotify/pull/617 +[#619]: https://github.com/fsnotify/fsnotify/pull/619 +[#620]: https://github.com/fsnotify/fsnotify/pull/620 +[#621]: https://github.com/fsnotify/fsnotify/pull/621 +[#625]: https://github.com/fsnotify/fsnotify/pull/625 +[#650]: https://github.com/fsnotify/fsnotify/pull/650 1.7.0 - 2023-10-22 ------------------ diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index ea379759d51..e4ac2a2fffd 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,7 +1,7 @@ Thank you for your interest in contributing to fsnotify! We try to review and merge PRs in a reasonable timeframe, but please be aware that: -- To avoid "wasted" work, please discus changes on the issue tracker first. You +- To avoid "wasted" work, please discuss changes on the issue tracker first. You can just send PRs, but they may end up being rejected for one reason or the other. @@ -20,6 +20,124 @@ platforms. Testing different platforms locally can be done with something like Use the `-short` flag to make the "stress test" run faster. +Writing new tests +----------------- +Scripts in the testdata directory allow creating test cases in a "shell-like" +syntax. The basic format is: + + script + + Output: + desired output + +For example: + + # Create a new empty file with some data. + watch / + echo data >/file + + Output: + create /file + write /file + +Just create a new file to add a new test; select which tests to run with +`-run TestScript/[path]`. + +script +------ +The script is a "shell-like" script: + + cmd arg arg + +Comments are supported with `#`: + + # Comment + cmd arg arg # Comment + +All operations are done in a temp directory; a path like "/foo" is rewritten to +"/tmp/TestFoo/foo". + +Arguments can be quoted with `"` or `'`; there are no escapes and they're +functionally identical right now, but this may change in the future, so best to +assume shell-like rules. + + touch "/file with spaces" + +End-of-line escapes with `\` are not supported. + +### Supported commands + + watch path [ops] # Watch the path, reporting events for it. Nothing is + # watched by default. Optionally a list of ops can be + # given, as with AddWith(path, WithOps(...)). + unwatch path # Stop watching the path. + watchlist n # Assert watchlist length. + + stop # Stop running the script; for debugging. + debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in + parallel by default, so -parallel=1 is probably a good + idea). + + touch path + mkdir [-p] dir + ln -s target link # Only ln -s supported. + mkfifo path + mknod dev path + mv src dst + rm [-r] path + chmod mode path # Octal only + sleep time-in-ms + + cat path # Read path (does nothing with the data; just reads it). + echo str >>path # Append "str" to "path". + echo str >path # Truncate "path" and write "str". + + require reason # Skip the test if "reason" is true; "skip" and + skip reason # "require" behave identical; it supports both for + # readability. Possible reasons are: + # + # always Always skip this test. + # symlink Symlinks are supported (requires admin + # permissions on Windows). + # mkfifo Platform doesn't support FIFO named sockets. + # mknod Platform doesn't support device nodes. + + +output +------ +After `Output:` the desired output is given; this is indented by convention, but +that's not required. + +The format of that is: + + # Comment + event path # Comment + + system: + event path + system2: + event path + +Every event is one line, and any whitespace between the event and path are +ignored. The path can optionally be surrounded in ". Anything after a "#" is +ignored. + +Platform-specific tests can be added after GOOS; for example: + + watch / + touch /file + + Output: + # Tested if nothing else matches + create /file + + # Windows-specific test. + windows: + write /file + +You can specify multiple platforms with a comma (e.g. "windows, linux:"). +"kqueue" is a shortcut for all kqueue systems (BSD, macOS). + [goon]: https://github.com/arp242/goon [Vagrant]: https://www.vagrantup.com/ diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index 28497f1dd8e..c349c326c71 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -1,8 +1,8 @@ //go:build solaris -// +build solaris -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh +// FEN backend for illumos (supported) and Solaris (untested, but should work). +// +// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create package fsnotify @@ -12,150 +12,33 @@ import ( "os" "path/filepath" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type fen struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]struct{} // Explicitly watched directories - watches map[string]struct{} // Explicitly watched non-directories + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]Op // Explicitly watched directories + watches map[string]Op // Explicitly watched non-directories } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - w := &Watcher{ - Events: make(chan Event, sz), - Errors: make(chan error), - dirs: make(map[string]struct{}), - watches: make(map[string]struct{}), +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + w := &fen{ + Events: ev, + Errors: errs, + dirs: make(map[string]Op), + watches: make(map[string]Op), done: make(chan struct{}), } @@ -171,27 +54,30 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { // sendEvent attempts to send an event to the user, returning true if the event // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendEvent(name string, op Op) (sent bool) { +func (w *fen) sendEvent(name string, op Op) (sent bool) { select { - case w.Events <- Event{Name: name, Op: op}: - return true case <-w.done: return false + case w.Events <- Event{Name: name, Op: op}: + return true } } // sendError attempts to send an error to the user, returning true if the error // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendError(err error) (sent bool) { - select { - case w.Errors <- err: +func (w *fen) sendError(err error) (sent bool) { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *fen) isClosed() bool { select { case <-w.done: return true @@ -200,8 +86,7 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *fen) Close() error { // Take the lock used by associateFile to prevent lingering events from // being processed after the close w.mu.Lock() @@ -213,60 +98,21 @@ func (w *Watcher) Close() error { return w.port.Close() } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *fen) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *fen) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } - if w.port.PathIsWatched(name) { - return nil + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) } - _ = getOptions(opts...) + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } // Currently we resolve symlinks that were explicitly requested to be // watched. Otherwise we would use LStat here. @@ -283,7 +129,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.dirs[name] = struct{}{} + w.dirs[name] = with.op w.mu.Unlock() return nil } @@ -294,26 +140,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.watches[name] = struct{}{} + w.watches[name] = with.op w.mu.Unlock() return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *fen) Remove(name string) error { if w.isClosed() { return nil } if !w.port.PathIsWatched(name) { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } // The user has expressed an intent. Immediately remove this name from // whichever watch list it might be in. If it's not in there the delete @@ -346,7 +188,7 @@ func (w *Watcher) Remove(name string) error { } // readEvents contains the main loop that runs in a goroutine watching for events. -func (w *Watcher) readEvents() { +func (w *fen) readEvents() { // If this function returns, the watcher has been closed and we can close // these channels defer func() { @@ -382,17 +224,19 @@ func (w *Watcher) readEvents() { continue } + if debug { + internal.Debug(pevent.Path, pevent.Events) + } + err = w.handleEvent(&pevent) - if err != nil { - if !w.sendError(err) { - return - } + if !w.sendError(err) { + return } } } } -func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { +func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { files, err := os.ReadDir(path) if err != nil { return err @@ -418,7 +262,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha // bitmap matches more than one event type (e.g. the file was both modified and // had the attributes changed between when the association was created and the // when event was returned) -func (w *Watcher) handleEvent(event *unix.PortEvent) error { +func (w *fen) handleEvent(event *unix.PortEvent) error { var ( events = event.Events path = event.Path @@ -510,15 +354,9 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { } if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() { - if watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } + if fmode.IsDir() && watchedDir { + if err := w.updateDirectory(path); err != nil { + return err } } else { if !w.sendEvent(path, Write) { @@ -543,7 +381,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { return nil } -func (w *Watcher) updateDirectory(path string) error { +func (w *fen) updateDirectory(path string) error { // The directory was modified, so we must find unwatched entities and watch // them. If something was removed from the directory, nothing will happen, // as everything else should still be watched. @@ -563,10 +401,8 @@ func (w *Watcher) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) - if err != nil { - if !w.sendError(err) { - return nil - } + if !w.sendError(err) { + return nil } if !w.sendEvent(path, Create) { return nil @@ -575,7 +411,7 @@ func (w *Watcher) updateDirectory(path string) error { return nil } -func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { +func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if w.isClosed() { return ErrClosed } @@ -593,34 +429,34 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro // cleared up that discrepancy. The most likely cause is that the event // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) - if err != nil && err != unix.ENOENT { + if err != nil && !errors.Is(err, unix.ENOENT) { return err } } - // FILE_NOFOLLOW means we watch symlinks themselves rather than their - // targets. - events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW - if follow { - // We *DO* follow symlinks for explicitly watched entries. - events = unix.FILE_MODIFIED | unix.FILE_ATTRIB + + var events int + if !follow { + // Watch symlinks themselves rather than their targets unless this entry + // is explicitly watched. + events |= unix.FILE_NOFOLLOW + } + if true { // TODO: implement withOps() + events |= unix.FILE_MODIFIED } - return w.port.AssociatePath(path, stat, - events, - stat.Mode()) + if true { + events |= unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, events, stat.Mode()) } -func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { +func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } return w.port.DissociatePath(path) } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *fen) WatchList() []string { if w.isClosed() { return nil } @@ -638,3 +474,11 @@ func (w *Watcher) WatchList() []string { return entries } + +func (w *fen) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 921c1c1e401..36c311694cd 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -1,8 +1,4 @@ //go:build linux && !appengine -// +build linux,!appengine - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -10,127 +6,20 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type inotify struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error // Store fd here as os.File.Read() will no longer return on close after @@ -139,8 +28,26 @@ type Watcher struct { inotifyFile *os.File watches *watches done chan struct{} // Channel for sending a "quit message" to the reader goroutine - closeMu sync.Mutex + doneMu sync.Mutex doneResp chan struct{} // Channel to respond to Close + + // Store rename cookies in an array, with the index wrapping to 0. Almost + // all of the time what we get is a MOVED_FROM to set the cookie and the + // next event inotify sends will be MOVED_TO to read it. However, this is + // not guaranteed – as described in inotify(7) – and we may get other events + // between the two MOVED_* events (including other MOVED_* ones). + // + // A second issue is that moving a file outside the watched directory will + // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to + // read and delete it. So just storing it in a map would slowly leak memory. + // + // Doing it like this gives us a simple fast LRU-cache that won't allocate. + // Ten items should be more than enough for our purpose, and a loop over + // such a short array is faster than a map access anyway (not that it hugely + // matters since we're talking about hundreds of ns at the most, but still). + cookies [10]koekje + cookieIndex uint8 + cookiesMu sync.Mutex } type ( @@ -150,9 +57,14 @@ type ( path map[string]uint32 // pathname → wd } watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + recurse bool // Recursion with ./...? + } + koekje struct { + cookie uint32 + path string } ) @@ -179,23 +91,45 @@ func (w *watches) add(ww *watch) { func (w *watches) remove(wd uint32) { w.mu.Lock() defer w.mu.Unlock() - delete(w.path, w.wd[wd].path) + watch := w.wd[wd] // Could have had Remove() called. See #616. + if watch == nil { + return + } + delete(w.path, watch.path) delete(w.wd, wd) } -func (w *watches) removePath(path string) (uint32, bool) { +func (w *watches) removePath(path string) ([]uint32, error) { w.mu.Lock() defer w.mu.Unlock() + path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { - return 0, false + return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path) + } + + watch := w.wd[wd] + if recurse && !watch.recurse { + return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path) } delete(w.path, path) delete(w.wd, wd) + if !watch.recurse { + return []uint32{wd}, nil + } - return wd, true + wds := make([]uint32, 0, 8) + wds = append(wds, wd) + for p, rwd := range w.path { + if filepath.HasPrefix(p, path) { + delete(w.path, p) + delete(w.wd, rwd) + wds = append(wds, rwd) + } + } + return wds, nil } func (w *watches) byPath(path string) *watch { @@ -236,20 +170,11 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -257,12 +182,12 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { return nil, errno } - w := &Watcher{ + w := &inotify{ + Events: ev, + Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - Events: make(chan Event, sz), - Errors: make(chan error), done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -272,26 +197,29 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *inotify) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: +func (w *inotify) sendError(err error) bool { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *inotify) isClosed() bool { select { case <-w.done: return true @@ -300,15 +228,14 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.closeMu.Lock() +func (w *inotify) Close() error { + w.doneMu.Lock() if w.isClosed() { - w.closeMu.Unlock() + w.doneMu.Unlock() return nil } close(w.done) - w.closeMu.Unlock() + w.doneMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -323,78 +250,104 @@ func (w *Watcher) Close() error { return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *inotify) Add(name string) error { return w.AddWith(name) } + +func (w *inotify) AddWith(path string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), path) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - name = filepath.Clean(name) - _ = getOptions(opts...) + path, recurse := recursivePath(path) + if recurse { + return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + if root == path { + return fmt.Errorf("fsnotify: not a directory: %q", path) + } + return nil + } - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + // Send a Create event when adding new directory from a recursive + // watch; this is for "mkdir -p one/two/three". Usually all those + // directories will be created before we can set up watchers on the + // subdirectories, so only "one" would be sent as a Create event and + // not "one/two" and "one/two/three" (inotifywait -r has the same + // problem). + if with.sendCreate && root != path { + w.sendEvent(Event{Name: root, Op: Create}) + } + + return w.add(root, with, true) + }) + } - return w.watches.updatePath(name, func(existing *watch) (*watch, error) { + return w.add(path, with, false) +} + +func (w *inotify) add(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) +} + +func (w *inotify) register(path string, flags uint32, recurse bool) error { + return w.watches.updatePath(path, func(existing *watch) (*watch, error) { if existing != nil { flags |= existing.flags | unix.IN_MASK_ADD } - wd, err := unix.InotifyAddWatch(w.fd, name, flags) + wd, err := unix.InotifyAddWatch(w.fd, path, flags) if wd == -1 { return nil, err } if existing == nil { return &watch{ - wd: uint32(wd), - path: name, - flags: flags, + wd: uint32(wd), + path: path, + flags: flags, + recurse: recurse, }, nil } @@ -404,49 +357,44 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { }) } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *inotify) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(filepath.Clean(name)) } -func (w *Watcher) remove(name string) error { - wd, ok := w.watches.removePath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - success, errno := unix.InotifyRmWatch(w.fd, wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno +func (w *inotify) remove(name string) error { + wds, err := w.watches.removePath(name) + if err != nil { + return err + } + + for _, wd := range wds { + _, err := unix.InotifyRmWatch(w.fd, wd) + if err != nil { + // TODO: Perhaps it's not helpful to return an error here in every + // case; the only two possible errors are: + // + // EBADF, which happens when w.fd is not a valid file descriptor of + // any kind. + // + // EINVAL, which is when fd is not an inotify descriptor or wd is + // not a valid watch descriptor. Watch descriptors are invalidated + // when they are removed explicitly or implicitly; explicitly by + // inotify_rm_watch, implicitly when the file they are watching is + // deleted. + return err + } } return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *inotify) WatchList() []string { if w.isClosed() { return nil } @@ -463,7 +411,7 @@ func (w *Watcher) WatchList() []string { // readEvents reads from the inotify file descriptor, converts the // received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { +func (w *inotify) readEvents() { defer func() { close(w.doneResp) close(w.Errors) @@ -506,15 +454,17 @@ func (w *Watcher) readEvents() { continue } - var offset uint32 // We don't know how many events we just read into the buffer // While the offset points to at least one whole event... + var offset uint32 for offset <= uint32(n-unix.SizeofInotifyEvent) { var ( // Point "raw" to the event in the buffer raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) mask = uint32(raw.Mask) nameLen = uint32(raw.Len) + // Move to the next event in the buffer + next = func() { offset += unix.SizeofInotifyEvent + nameLen } ) if mask&unix.IN_Q_OVERFLOW != 0 { @@ -523,21 +473,53 @@ func (w *Watcher) readEvents() { } } - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. + /// If the event happened to the watched directory or the watched + /// file, the kernel doesn't append the filename to the event, but + /// we would like to always fill the the "Name" field with a valid + /// filename. We retrieve the path of the watch from the "paths" + /// map. watch := w.watches.byWd(uint32(raw.Wd)) + /// Can be nil if Remove() was called in another goroutine for this + /// path inbetween reading the events from the kernel and reading + /// the internal state. Not much we can do about it, so just skip. + /// See #616. + if watch == nil { + next() + continue + } + + name := watch.path + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + if debug { + internal.Debug(name, raw.Mask, raw.Cookie) + } + + if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 + next() + continue + } // inotify will automatically remove the watch on deletes; just need // to clean our state here. - if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { w.watches.remove(watch.wd) } + // We can't really update the state when a watched path is moved; // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove // the watch. - if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { + next() // Do nothing + continue + } + err := w.remove(watch.path) if err != nil && !errors.Is(err, ErrNonExistentWatch) { if !w.sendError(err) { @@ -546,34 +528,69 @@ func (w *Watcher) readEvents() { } } - var name string - if watch != nil { - name = watch.path - } - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + /// Skip if we're watching both this path and the parent; the parent + /// will already send a delete so no need to do it twice. + if mask&unix.IN_DELETE_SELF != 0 { + if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { + next() + continue + } } - event := w.newEvent(name, mask) + ev := w.newEvent(name, mask, raw.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return + } - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return + // This was a directory rename, so we need to update all + // the children. + // + // TODO: this is of course pretty slow; we should use a + // better data structure for storing all of this, e.g. store + // children in the watch. I have some code for this in my + // kqueue refactor we can use in the future. For now I'm + // okay with this as it's not publicly available. + // Correctness first, performance second. + if ev.renamedFrom != "" { + w.watches.mu.Lock() + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue + } + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww + } + } + w.watches.mu.Unlock() + } } } - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen + /// Send the events that are not ignored on the events channel + if !w.sendEvent(ev) { + return + } + next() } } } -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *inotify) isRecursive(path string) bool { + ww := w.watches.byPath(path) + if ww == nil { // path could be a file, so also check the Dir. + ww = w.watches.byPath(filepath.Dir(path)) + } + return ww != nil && ww.recurse +} + +func (w *inotify) newEvent(name string, mask, cookie uint32) Event { e := Event{Name: name} if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { e.Op |= Create @@ -584,11 +601,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&unix.IN_MODIFY == unix.IN_MODIFY { e.Op |= Write } + if mask&unix.IN_OPEN == unix.IN_OPEN { + e.Op |= xUnportableOpen + } + if mask&unix.IN_ACCESS == unix.IN_ACCESS { + e.Op |= xUnportableRead + } + if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE { + e.Op |= xUnportableCloseWrite + } + if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE { + e.Op |= xUnportableCloseRead + } if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { e.Op |= Rename } if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { e.Op |= Chmod } + + if cookie != 0 { + if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + w.cookiesMu.Lock() + w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name} + w.cookieIndex++ + if w.cookieIndex > 9 { + w.cookieIndex = 0 + } + w.cookiesMu.Unlock() + } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + w.cookiesMu.Lock() + var prev string + for _, c := range w.cookies { + if c.cookie == cookie { + prev = c.path + break + } + } + w.cookiesMu.Unlock() + e.renamedFrom = prev + } + } return e } + +func (w *inotify) xSupports(op Op) bool { + return true // Supports everything. +} + +func (w *inotify) state() { + w.watches.mu.Lock() + defer w.watches.mu.Unlock() + for wd, ww := range w.watches.wd { + fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index 063a0915a07..d8de5ab76fd 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -1,8 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -11,174 +7,195 @@ import ( "fmt" "os" "path/filepath" + "runtime" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type kqueue struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing kq. + watches *watches + done chan struct{} + doneMu sync.Mutex } -type pathInfo struct { - name string - isDir bool +type ( + watches struct { + mu sync.RWMutex + wd map[int]watch // wd → watch + path map[string]int // pathname → wd + byDir map[string]map[int]struct{} // dirname(path) → wd + seen map[string]struct{} // Keep track of if we know this file exists. + byUser map[string]struct{} // Watches added with Watcher.Add() + } + watch struct { + wd int + name string + linkName string // In case of links; name is the target, and this is the link. + isDir bool + dirFlags uint32 + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[int]watch), + path: make(map[string]int), + byDir: make(map[string]map[int]struct{}), + seen: make(map[string]struct{}), + byUser: make(map[string]struct{}), + } } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func (w *watches) listPaths(userOnly bool) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + if userOnly { + l := make([]string, 0, len(w.byUser)) + for p := range w.byUser { + l = append(l, p) + } + return l + } + + l := make([]string, 0, len(w.path)) + for p := range w.path { + l = append(l, p) + } + return l } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func (w *watches) watchesInDir(path string) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + l := make([]string, 0, 4) + for fd := range w.byDir[path] { + info := w.wd[fd] + if _, ok := w.byUser[info.name]; !ok { + l = append(l, info.name) + } + } + return l +} + +// Mark path as added by the user. +func (w *watches) addUserWatch(path string) { + w.mu.Lock() + defer w.mu.Unlock() + w.byUser[path] = struct{}{} +} + +func (w *watches) addLink(path string, fd int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.seen[path] = struct{}{} +} + +func (w *watches) add(path, linkPath string, fd int, isDir bool) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir} + + parent := filepath.Dir(path) + byDir, ok := w.byDir[parent] + if !ok { + byDir = make(map[int]struct{}, 1) + w.byDir[parent] = byDir + } + byDir[fd] = struct{}{} +} + +func (w *watches) byWd(fd int) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[fd] + return info, ok +} + +func (w *watches) byPath(path string) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[w.path[path]] + return info, ok +} + +func (w *watches) updateDirFlags(path string, flags uint32) { + w.mu.Lock() + defer w.mu.Unlock() + + fd := w.path[path] + info := w.wd[fd] + info.dirFlags = flags + w.wd[fd] = info +} + +func (w *watches) remove(fd int, path string) bool { + w.mu.Lock() + defer w.mu.Unlock() + + isDir := w.wd[fd].isDir + delete(w.path, path) + delete(w.byUser, path) + + parent := filepath.Dir(path) + delete(w.byDir[parent], fd) + + if len(w.byDir[parent]) == 0 { + delete(w.byDir, parent) + } + + delete(w.wd, fd) + delete(w.seen, path) + return isDir +} + +func (w *watches) markSeen(path string, exists bool) { + w.mu.Lock() + defer w.mu.Unlock() + if exists { + w.seen[path] = struct{}{} + } else { + delete(w.seen, path) + } +} + +func (w *watches) seenBefore(path string) bool { + w.mu.RLock() + defer w.mu.RUnlock() + _, ok := w.seen[path] + return ok +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), + w := &kqueue{ + Events: ev, + Errors: errs, + kq: kq, + closepipe: closepipe, + done: make(chan struct{}), + watches: newWatches(), } go w.readEvents() @@ -203,6 +220,8 @@ func newKqueue() (kq int, closepipe [2]int, err error) { unix.Close(kq) return kq, closepipe, err } + unix.CloseOnExec(closepipe[0]) + unix.CloseOnExec(closepipe[1]) // Register changes to listen on the closepipe. changes := make([]unix.Kevent_t, 1) @@ -221,166 +240,108 @@ func newKqueue() (kq int, closepipe [2]int, err error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *kqueue) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *kqueue) sendError(err error) bool { + if err == nil { + return true + } select { + case <-w.done: + return false case w.Errors <- err: return true + } +} + +func (w *kqueue) isClosed() bool { + select { case <-w.done: + return true + default: return false } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) Close() error { + w.doneMu.Lock() + if w.isClosed() { + w.doneMu.Unlock() return nil } - w.isClosed = true + close(w.done) + w.doneMu.Unlock() - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks + pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } // Send "quit" message to the reader goroutine. unix.Close(w.closepipe[1]) - close(w.done) - return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *kqueue) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - _ = getOptions(opts...) +func (w *kqueue) AddWith(name string, opts ...addOpt) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() _, err := w.addWatch(name, noteAllEvents) - return err + if err != nil { + return err + } + w.watches.addUserWatch(name) + return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *kqueue) Remove(name string) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(name, true) } -func (w *Watcher) remove(name string, unwatchFiles bool) error { - name = filepath.Clean(name) - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) remove(name string, unwatchFiles bool) error { + if w.isClosed() { return nil } - watchfd, ok := w.watches[name] - w.mu.Unlock() + + name = filepath.Clean(name) + info, ok := w.watches.byPath(name) if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + err := w.register([]int{info.wd}, unix.EV_DELETE, 0) if err != nil { return err } - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } + unix.Close(info.wd) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() + isDir := w.watches.remove(info.wd, name) // Find all watched paths that are in this directory that are not external. if unwatchFiles && isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() + pathsToRemove := w.watches.watchesInDir(name) for _, name := range pathsToRemove { // Since these are internal, not much sense in propagating error to // the user, as that will just confuse them with an error about a @@ -391,23 +352,11 @@ func (w *Watcher) remove(name string, unwatchFiles bool) error { return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed { +func (w *kqueue) WatchList() []string { + if w.isClosed() { return nil } - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries + return w.watches.listPaths(true) } // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) @@ -417,34 +366,26 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) addWatch(name string, flags uint32) (string, error) { + if w.isClosed() { return "", ErrClosed } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() + name = filepath.Clean(name) + + info, alreadyWatching := w.watches.byPath(name) if !alreadyWatching { fi, err := os.Lstat(name) if err != nil { return "", err } - // Don't watch sockets or named pipes + // Don't watch sockets or named pipes. if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { return "", nil } - // Follow Symlinks. + // Follow symlinks. if fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { @@ -455,18 +396,15 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", nil } - w.mu.Lock() - _, alreadyWatching = w.watches[link] - w.mu.Unlock() - + _, alreadyWatching = w.watches.byPath(link) if alreadyWatching { // Add to watches so we don't get spurious Create events later // on when we diff the directories. - w.watches[name] = 0 - w.fileExists[name] = struct{}{} + w.watches.addLink(name, 0) return link, nil } + info.linkName = name name = link fi, err = os.Lstat(name) if err != nil { @@ -477,7 +415,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // Retry on EINTR; open() can return EINTR in practice on macOS. // See #354, and Go issues 11180 and 39237. for { - watchfd, err = unix.Open(name, openMode, 0) + info.wd, err = unix.Open(name, openMode, 0) if err == nil { break } @@ -488,40 +426,25 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", err } - isDir = fi.IsDir() + info.isDir = fi.IsDir() } - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) if err != nil { - unix.Close(watchfd) + unix.Close(info.wd) return "", err } if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() + w.watches.add(name, info.linkName, info.wd, info.isDir) } - if isDir { - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() + (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) + w.watches.updateDirFlags(name, flags) if watchDir { if err := w.watchDirectoryFiles(name); err != nil { @@ -534,7 +457,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // readEvents reads from kqueue and converts the received kevents into // Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { +func (w *kqueue) readEvents() { defer func() { close(w.Events) close(w.Errors) @@ -543,50 +466,65 @@ func (w *Watcher) readEvents() { }() eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { + for { kevents, err := w.read(eventBuffer) // EINTR is okay, the syscall was interrupted before timeout expired. if err != nil && err != unix.EINTR { if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true + return } - continue } - // Flush the events we received to the Events channel for _, kevent := range kevents { var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) + wd = int(kevent.Ident) + mask = uint32(kevent.Fflags) ) // Shut down the loop when the pipe is closed, but only after all // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue + if wd == w.closepipe[0] { + return } - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() + path, ok := w.watches.byWd(wd) + if debug { + internal.Debug(path.name, &kevent) + } - event := w.newEvent(path.name, mask) + // On macOS it seems that sometimes an event with Ident=0 is + // delivered, and no other flags/information beyond that, even + // though we never saw such a file descriptor. For example in + // TestWatchSymlink/277 (usually at the end, but sometimes sooner): + // + // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent) + // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // + // The first is a normal event, the second with Ident 0. No error + // flag, no data, no ... nothing. + // + // I read a bit through bsd/kern_event.c from the xnu source, but I + // don't really see an obvious location where this is triggered – + // this doesn't seem intentional, but idk... + // + // Technically fd 0 is a valid descriptor, so only skip it if + // there's no path, and if we're on macOS. + if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" { + continue + } + + event := w.newEvent(path.name, path.linkName, mask) if event.Has(Rename) || event.Has(Remove) { w.remove(event.Name, false) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() + w.watches.markSeen(event.Name, false) } if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } + w.dirChange(event.Name) + } else if !w.sendEvent(event) { + return } if event.Has(Remove) { @@ -594,25 +532,34 @@ func (w *Watcher) readEvents() { // mv f1 f2 will delete f2, then create f2. if path.isDir { fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() + _, found := w.watches.byPath(fileDir) if found { - err := w.sendDirectoryChangeEvents(fileDir) - if err != nil { - if !w.sendError(err) { - closed = true - } + // TODO: this branch is never triggered in any test. + // Added in d6220df (2012). + // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111 + // + // I don't really get how this can be triggered either. + // And it wasn't triggered in the patch that added it, + // either. + // + // Original also had a comment: + // make sure the directory exists before we watch for + // changes. When we do a recursive watch and perform + // rm -rf, the parent directory might have gone + // missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the + // parent directory. + err := w.dirChange(fileDir) + if !w.sendError(err) { + return } } } else { - filePath := filepath.Clean(event.Name) - if fi, err := os.Lstat(filePath); err == nil { - err := w.sendFileCreatedEventIfNew(filePath, fi) - if err != nil { - if !w.sendError(err) { - closed = true - } + path := filepath.Clean(event.Name) + if fi, err := os.Lstat(path); err == nil { + err := w.sendCreateIfNew(path, fi) + if !w.sendError(err) { + return } } } @@ -622,8 +569,14 @@ func (w *Watcher) readEvents() { } // newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *kqueue) newEvent(name, linkName string, mask uint32) Event { e := Event{Name: name} + if linkName != "" { + // If the user watched "/path/link" then emit events as "/path/link" + // rather than "/path/target". + e.Name = linkName + } + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { e.Op |= Remove } @@ -645,8 +598,7 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { } // watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files +func (w *kqueue) watchDirectoryFiles(dirPath string) error { files, err := os.ReadDir(dirPath) if err != nil { return err @@ -674,9 +626,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { } } - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() + w.watches.markSeen(cleanPath, true) } return nil @@ -686,7 +636,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { // // This functionality is to have the BSD watcher match the inotify, which sends // a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) error { +func (w *kqueue) dirChange(dir string) error { files, err := os.ReadDir(dir) if err != nil { // Directory no longer exists: we can ignore this safely. kqueue will @@ -694,61 +644,51 @@ func (w *Watcher) sendDirectoryChangeEvents(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } for _, f := range files { fi, err := f.Info() if err != nil { - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } - err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } } return nil } -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return +// Send a create event if the file isn't already being tracked, and start +// watching this file. +func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error { + if !w.watches.seenBefore(path) { + if !w.sendEvent(Event{Name: path, Op: Create}) { + return nil } } - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fi) + // Like watchDirectoryFiles, but without doing another ReadDir. + path, err := w.internalWatch(path, fi) if err != nil { return err } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - + w.watches.markSeen(path, true) return nil } -func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { +func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { if fi.IsDir() { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) + info, _ := w.watches.byPath(name) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) } // watch file to mimic Linux inotify @@ -756,7 +696,7 @@ func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { } // Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { +func (w *kqueue) register(fds []int, flags int, fflags uint32) error { changes := make([]unix.Kevent_t, len(fds)) for i, fd := range fds { // SetKevent converts int to the platform-specific types. @@ -773,10 +713,21 @@ func (w *Watcher) register(fds []int, flags int, fflags uint32) error { } // read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { +func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { n, err := unix.Kevent(w.kq, nil, events, nil) if err != nil { return nil, err } return events[0:n], nil } + +func (w *kqueue) xSupports(op Op) bool { + if runtime.GOOS == "freebsd" { + //return true // Supports everything. + } + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index d34a23c015f..5eb5dbc66f2 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -1,205 +1,23 @@ //go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) -// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify import "errors" -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type other struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return nil } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return nil } - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return nil } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { return nil } +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + return newBackend(ev, errs) +} +func (w *other) Close() error { return nil } +func (w *other) WatchList() []string { return nil } +func (w *other) Add(name string) error { return nil } +func (w *other) AddWith(name string, opts ...addOpt) error { return nil } +func (w *other) Remove(name string) error { return nil } +func (w *other) xSupports(op Op) bool { return false } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index 9bc91e5d613..c54a6308383 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -1,12 +1,8 @@ //go:build windows -// +build windows // Windows backend based on ReadDirectoryChangesW() // // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -// -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -19,123 +15,15 @@ import ( "runtime" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/windows" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type readDirChangesW struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error port windows.Handle // Handle to completion port @@ -147,48 +35,40 @@ type Watcher struct { closed bool // Set to true when Close() is first called } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(50) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(50, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) } - w := &Watcher{ + w := &readDirChangesW{ + Events: ev, + Errors: errs, port: port, watches: make(watchMap), input: make(chan *input, 1), - Events: make(chan Event, sz), - Errors: make(chan error), quit: make(chan chan<- error, 1), } go w.readEvents() return w, nil } -func (w *Watcher) isClosed() bool { +func (w *readDirChangesW) isClosed() bool { w.mu.Lock() defer w.mu.Unlock() return w.closed } -func (w *Watcher) sendEvent(name string, mask uint64) bool { +func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool { if mask == 0 { return false } event := w.newEvent(name, uint32(mask)) + event.renamedFrom = renamedFrom select { case ch := <-w.quit: w.quit <- ch @@ -198,17 +78,19 @@ func (w *Watcher) sendEvent(name string, mask uint64) bool { } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *readDirChangesW) sendError(err error) bool { + if err == nil { + return true + } select { case w.Errors <- err: return true case <-w.quit: + return false } - return false } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *readDirChangesW) Close() error { if w.isClosed() { return nil } @@ -226,57 +108,21 @@ func (w *Watcher) Close() error { return <-ch } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } if with.bufsize < 4096 { return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") } @@ -295,18 +141,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { return <-in.reply } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *readDirChangesW) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } in := &input{ op: opRemoveWatch, @@ -320,11 +162,7 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *readDirChangesW) WatchList() []string { if w.isClosed() { return nil } @@ -335,7 +173,13 @@ func (w *Watcher) WatchList() []string { entries := make([]string, 0, len(w.watches)) for _, entry := range w.watches { for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) + for name := range watchEntry.names { + entries = append(entries, filepath.Join(watchEntry.path, name)) + } + // the directory itself is being watched + if watchEntry.mask != 0 { + entries = append(entries, watchEntry.path) + } } } @@ -361,7 +205,7 @@ const ( sysFSIGNORED = 0x8000 ) -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *readDirChangesW) newEvent(name string, mask uint32) Event { e := Event{Name: name} if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { e.Op |= Create @@ -417,7 +261,7 @@ type ( watchMap map[uint32]indexMap ) -func (w *Watcher) wakeupReader() error { +func (w *readDirChangesW) wakeupReader() error { err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) if err != nil { return os.NewSyscallError("PostQueuedCompletionStatus", err) @@ -425,7 +269,7 @@ func (w *Watcher) wakeupReader() error { return nil } -func (w *Watcher) getDir(pathname string) (dir string, err error) { +func (w *readDirChangesW) getDir(pathname string) (dir string, err error) { attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) if err != nil { return "", os.NewSyscallError("GetFileAttributes", err) @@ -439,7 +283,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) { return } -func (w *Watcher) getIno(path string) (ino *inode, err error) { +func (w *readDirChangesW) getIno(path string) (ino *inode, err error) { h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), windows.FILE_LIST_DIRECTORY, windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, @@ -482,9 +326,8 @@ func (m watchMap) set(ino *inode, watch *watch) { } // Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { - //pathname, recurse := recursivePath(pathname) - recurse := false +func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error { + pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) if err != nil { @@ -538,7 +381,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { } // Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { +func (w *readDirChangesW) remWatch(pathname string) error { pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) @@ -566,11 +409,11 @@ func (w *Watcher) remWatch(pathname string) error { return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) } if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) watch.mask = 0 } else { name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } @@ -578,23 +421,23 @@ func (w *Watcher) remWatch(pathname string) error { } // Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { +func (w *readDirChangesW) deleteWatch(watch *watch) { for name, mask := range watch.names { if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED) } delete(watch.names, name) } if watch.mask != 0 { if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) } watch.mask = 0 } } // Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { +func (w *readDirChangesW) startRead(watch *watch) error { err := windows.CancelIo(watch.ino.handle) if err != nil { w.sendError(os.NewSyscallError("CancelIo", err)) @@ -624,7 +467,7 @@ func (w *Watcher) startRead(watch *watch) error { err := os.NewSyscallError("ReadDirectoryChanges", rdErr) if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) err = nil } w.deleteWatch(watch) @@ -637,7 +480,7 @@ func (w *Watcher) startRead(watch *watch) error { // readEvents reads from the I/O completion port, converts the // received events into Event objects and sends them via the Events channel. // Entry point to the I/O thread. -func (w *Watcher) readEvents() { +func (w *readDirChangesW) readEvents() { var ( n uint32 key uintptr @@ -700,7 +543,7 @@ func (w *Watcher) readEvents() { } case windows.ERROR_ACCESS_DENIED: // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) w.deleteWatch(watch) w.startRead(watch) continue @@ -733,6 +576,10 @@ func (w *Watcher) readEvents() { name := windows.UTF16ToString(buf) fullname := filepath.Join(watch.path, name) + if debug { + internal.Debug(fullname, raw.Action) + } + var mask uint64 switch raw.Action { case windows.FILE_ACTION_REMOVED: @@ -761,21 +608,22 @@ func (w *Watcher) readEvents() { } } - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() + w.sendEvent(fullname, "", watch.names[name]&mask) } if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action)) + } else { + w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action)) + } + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() + w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask) } // Move to the next event in the buffer @@ -787,8 +635,7 @@ func (w *Watcher) readEvents() { // Error! if offset >= n { //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed")) + w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed")) break } } @@ -799,7 +646,7 @@ func (w *Watcher) readEvents() { } } -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { +func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 { var m uint32 if mask&sysFSMODIFY != 0 { m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE @@ -810,7 +657,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 { return m } -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { +func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 { switch action { case windows.FILE_ACTION_ADDED: return sysFSCREATE @@ -825,3 +672,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { } return 0 } + +func (w *readDirChangesW) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 24c99cc4999..0760efe9160 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -3,19 +3,146 @@ // // Currently supported systems: // -// Linux 2.6.32+ via inotify -// BSD, macOS via kqueue -// Windows via ReadDirectoryChangesW -// illumos via FEN +// - Linux via inotify +// - BSD, macOS via kqueue +// - Windows via ReadDirectoryChangesW +// - illumos via FEN +// +// # FSNOTIFY_DEBUG +// +// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to +// stderr. This can be useful to track down some problems, especially in cases +// where fsnotify is used as an indirect dependency. +// +// Every event will be printed as soon as there's something useful to print, +// with as little processing from fsnotify. +// +// Example output: +// +// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1" package fsnotify import ( "errors" "fmt" + "os" "path/filepath" "strings" ) +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\\path\\to\\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all files, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + b backend + + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + // Event represents a file system notification. type Event struct { // Path to the file or directory. @@ -30,6 +157,16 @@ type Event struct { // This is a bitmask and some systems may send multiple operations at once. // Use the Event.Has() method instead of comparing with ==. Op Op + + // Create events will have this set to the old path if it's a rename. This + // only works when both the source and destination are watched. It's not + // reliable when watching individual files, only directories. + // + // For example "mv /tmp/file /tmp/rename" will emit: + // + // Event{Op: Rename, Name: "/tmp/file"} + // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"} + renamedFrom string } // Op describes a set of file operations. @@ -50,7 +187,7 @@ const ( // example "remove to trash" is often a rename). Remove - // The path was renamed to something else; any watched on it will be + // The path was renamed to something else; any watches on it will be // removed. Rename @@ -60,15 +197,155 @@ const ( // get triggered very frequently by some software. For example, Spotlight // indexing on macOS, anti-virus software, backup software, etc. Chmod + + // File descriptor was opened. + // + // Only works on Linux and FreeBSD. + xUnportableOpen + + // File was read from. + // + // Only works on Linux and FreeBSD. + xUnportableRead + + // File opened for writing was closed. + // + // Only works on Linux and FreeBSD. + // + // The advantage of using this over Write is that it's more reliable than + // waiting for Write events to stop. It's also faster (if you're not + // listening to Write events): copying a file of a few GB can easily + // generate tens of thousands of Write events in a short span of time. + xUnportableCloseWrite + + // File opened for reading was closed. + // + // Only works on Linux and FreeBSD. + xUnportableCloseRead ) -// Common errors that can be reported. var ( + // ErrNonExistentWatch is used when Remove() is called on a path that's not + // added. ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrClosed is used when trying to operate on a closed Watcher. + ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrEventOverflow is reported from the Errors channel when there are too + // many events: + // + // - inotify: inotify returns IN_Q_OVERFLOW – because there are too + // many queued events (the fs.inotify.max_queued_events + // sysctl can be used to increase this). + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + + // ErrUnsupported is returned by AddWith() when WithOps() specified an + // Unportable event that's not supported on this platform. + xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBackend(ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBufferedBackend(sz, ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(path string) error { return w.b.Add(path) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) } + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(path string) error { return w.b.Remove(path) } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return w.b.Close() } + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return w.b.WatchList() } + +// Supports reports if all the listed operations are supported by this platform. +// +// Create, Write, Remove, Rename, and Chmod are always supported. It can only +// return false for an Op starting with Unportable. +func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) } + func (o Op) String() string { var b strings.Builder if o.Has(Create) { @@ -80,6 +357,18 @@ func (o Op) String() string { if o.Has(Write) { b.WriteString("|WRITE") } + if o.Has(xUnportableOpen) { + b.WriteString("|OPEN") + } + if o.Has(xUnportableRead) { + b.WriteString("|READ") + } + if o.Has(xUnportableCloseWrite) { + b.WriteString("|CLOSE_WRITE") + } + if o.Has(xUnportableCloseRead) { + b.WriteString("|CLOSE_READ") + } if o.Has(Rename) { b.WriteString("|RENAME") } @@ -100,24 +389,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) } // String returns a string representation of the event with their path. func (e Event) String() string { + if e.renamedFrom != "" { + return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom) + } return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } type ( + backend interface { + Add(string) error + AddWith(string, ...addOpt) error + Remove(string) error + WatchList() []string + Close() error + xSupports(Op) bool + } addOpt func(opt *withOpts) withOpts struct { - bufsize int + bufsize int + op Op + noFollow bool + sendCreate bool } ) +var debug = func() bool { + // Check for exactly "1" (rather than mere existence) so we can add + // options/flags in the future. I don't know if we ever want that, but it's + // nice to leave the option open. + return os.Getenv("FSNOTIFY_DEBUG") == "1" +}() + var defaultOpts = withOpts{ bufsize: 65536, // 64K + op: Create | Write | Remove | Rename | Chmod, } func getOptions(opts ...addOpt) withOpts { with := defaultOpts for _, o := range opts { - o(&with) + if o != nil { + o(&with) + } } return with } @@ -136,9 +449,44 @@ func WithBufferSize(bytes int) addOpt { return func(opt *withOpts) { opt.bufsize = bytes } } +// WithOps sets which operations to listen for. The default is [Create], +// [Write], [Remove], [Rename], and [Chmod]. +// +// Excluding operations you're not interested in can save quite a bit of CPU +// time; in some use cases there may be hundreds of thousands of useless Write +// or Chmod operations per second. +// +// This can also be used to add unportable operations not supported by all +// platforms; unportable operations all start with "Unportable": +// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and +// [UnportableCloseRead]. +// +// AddWith returns an error when using an unportable operation that's not +// supported. Use [Watcher.Support] to check for support. +func withOps(op Op) addOpt { + return func(opt *withOpts) { opt.op = op } +} + +// WithNoFollow disables following symlinks, so the symlinks themselves are +// watched. +func withNoFollow() addOpt { + return func(opt *withOpts) { opt.noFollow = true } +} + +// "Internal" option for recursive watches on inotify. +func withCreate() addOpt { + return func(opt *withOpts) { opt.sendCreate = true } +} + +var enableRecurse = false + // Check if this path is recursive (ends with "/..." or "\..."), and return the // path with the /... stripped. func recursivePath(path string) (string, bool) { + path = filepath.Clean(path) + if !enableRecurse { // Only enabled in tests for now. + return path, false + } if filepath.Base(path) == "..." { return filepath.Dir(path), true } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go new file mode 100644 index 00000000000..b0eab10090d --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -0,0 +1,39 @@ +//go:build darwin + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ +func SetRlimit() { + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = l.Cur + + if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } + + if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go new file mode 100644 index 00000000000..928319fb09a --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go @@ -0,0 +1,57 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CRITICAL", unix.NOTE_CRITICAL}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS}, + {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR}, + {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL}, + {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL}, + {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK}, + {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY}, + {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK}, + {"NOTE_LEEWAY", unix.NOTE_LEEWAY}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MACHTIME", unix.NOTE_MACHTIME}, + {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME}, + {"NOTE_NONE", unix.NOTE_NONE}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OOB", unix.NOTE_OOB}, + //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!) + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_REAP", unix.NOTE_REAP}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_SIGNAL", unix.NOTE_SIGNAL}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR}, + {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE}, + {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go new file mode 100644 index 00000000000..3186b0c3491 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go @@ -0,0 +1,33 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_OOB", unix.NOTE_OOB}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go new file mode 100644 index 00000000000..f69fdb930f5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go @@ -0,0 +1,42 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSTIME", unix.NOTE_ABSTIME}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CLOSE", unix.NOTE_CLOSE}, + {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MSECONDS", unix.NOTE_MSECONDS}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OPEN", unix.NOTE_OPEN}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_READ", unix.NOTE_READ}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go new file mode 100644 index 00000000000..607e683bd73 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go @@ -0,0 +1,32 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, kevent *unix.Kevent_t) { + mask := uint32(kevent.Fflags) + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go new file mode 100644 index 00000000000..35c734be431 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask, cookie uint32) { + names := []struct { + n string + m uint32 + }{ + {"IN_ACCESS", unix.IN_ACCESS}, + {"IN_ATTRIB", unix.IN_ATTRIB}, + {"IN_CLOSE", unix.IN_CLOSE}, + {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE}, + {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE}, + {"IN_CREATE", unix.IN_CREATE}, + {"IN_DELETE", unix.IN_DELETE}, + {"IN_DELETE_SELF", unix.IN_DELETE_SELF}, + {"IN_IGNORED", unix.IN_IGNORED}, + {"IN_ISDIR", unix.IN_ISDIR}, + {"IN_MODIFY", unix.IN_MODIFY}, + {"IN_MOVE", unix.IN_MOVE}, + {"IN_MOVED_FROM", unix.IN_MOVED_FROM}, + {"IN_MOVED_TO", unix.IN_MOVED_TO}, + {"IN_MOVE_SELF", unix.IN_MOVE_SELF}, + {"IN_OPEN", unix.IN_OPEN}, + {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW}, + {"IN_UNMOUNT", unix.IN_UNMOUNT}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + var c string + if cookie > 0 { + c = fmt.Sprintf("(cookie: %d) ", cookie) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go new file mode 100644 index 00000000000..e5b3b6f6943 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go @@ -0,0 +1,25 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go new file mode 100644 index 00000000000..1dd455bc5a4 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go @@ -0,0 +1,28 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386? + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EOF", unix.NOTE_EOF}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go new file mode 100644 index 00000000000..f1b2e73bd5b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go @@ -0,0 +1,45 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask int32) { + names := []struct { + n string + m int32 + }{ + {"FILE_ACCESS", unix.FILE_ACCESS}, + {"FILE_MODIFIED", unix.FILE_MODIFIED}, + {"FILE_ATTRIB", unix.FILE_ATTRIB}, + {"FILE_TRUNC", unix.FILE_TRUNC}, + {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW}, + {"FILE_DELETE", unix.FILE_DELETE}, + {"FILE_RENAME_TO", unix.FILE_RENAME_TO}, + {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM}, + {"UNMOUNTED", unix.UNMOUNTED}, + {"MOUNTEDOVER", unix.MOUNTEDOVER}, + {"FILE_EXCEPTION", unix.FILE_EXCEPTION}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go new file mode 100644 index 00000000000..52bf4ce53b5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go @@ -0,0 +1,40 @@ +package internal + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sys/windows" +) + +func Debug(name string, mask uint32) { + names := []struct { + n string + m uint32 + }{ + {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED}, + {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED}, + {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED}, + {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME}, + {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name)) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go new file mode 100644 index 00000000000..547df1df84b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -0,0 +1,31 @@ +//go:build freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/vendor/github.com/fsnotify/fsnotify/internal/internal.go new file mode 100644 index 00000000000..7daa45e19ee --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/internal.go @@ -0,0 +1,2 @@ +// Package internal contains some helpers. +package internal diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go new file mode 100644 index 00000000000..30976ce9739 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -0,0 +1,31 @@ +//go:build !windows && !darwin && !freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go new file mode 100644 index 00000000000..37dfeddc289 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go @@ -0,0 +1,7 @@ +//go:build !windows + +package internal + +func HasPrivilegesForSymlink() bool { + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go new file mode 100644 index 00000000000..a72c6495490 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -0,0 +1,41 @@ +//go:build windows + +package internal + +import ( + "errors" + + "golang.org/x/sys/windows" +) + +// Just a dummy. +var ( + SyscallEACCES = errors.New("dummy") + UnixEACCES = errors.New("dummy") +) + +func SetRlimit() {} +func Maxfiles() uint64 { return 1<<64 - 1 } +func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") } +func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") } + +func HasPrivilegesForSymlink() bool { + var sid *windows.SID + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false + } + defer windows.FreeSid(sid) + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false + } + return member || token.IsElevated() +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index 99012ae6539..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers so you don't need -# to update the same comment 5 times. - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func NewBufferedWatcher(' $newbuffered -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) AddWith(' $addwith -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go index 4322b0b8855..f65e8fe3edc 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go index 5da5ffa78fe..a29fc7aab62 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package fsnotify diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go index d975773d490..d970c7cf448 100644 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -264,7 +264,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { knd := reflect.ValueOf(node).Kind() if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { - return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values") + return errors.New("only structs, pointers, maps and slices are supported for setting values") } if nameProvider == nil { diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go index 860bb304c34..8ce9d3cf3b7 100644 --- a/vendor/github.com/google/pprof/profile/encode.go +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -122,6 +122,7 @@ func (p *Profile) preEncode() { } p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) + p.docURLX = addString(strings, p.DocURL) p.stringTable = make([]string, len(strings)) for s, i := range strings { @@ -156,6 +157,7 @@ func (p *Profile) encode(b *buffer) { encodeInt64Opt(b, 12, p.Period) encodeInt64s(b, 13, p.commentX) encodeInt64(b, 14, p.defaultSampleTypeX) + encodeInt64Opt(b, 15, p.docURLX) } var profileDecoder = []decoder{ @@ -237,6 +239,8 @@ var profileDecoder = []decoder{ func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, // int64 defaultSampleType = 14 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, + // string doc_link = 15; + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).docURLX) }, } // postDecode takes the unexported fields populated by decode (with @@ -384,6 +388,7 @@ func (p *Profile) postDecode() error { p.commentX = nil p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) + p.DocURL, err = getString(p.stringTable, &p.docURLX, err) p.stringTable = nil return err } diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index eee0132e740..ba4d746407c 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -476,6 +476,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { var timeNanos, durationNanos, period int64 var comments []string seenComments := map[string]bool{} + var docURL string var defaultSampleType string for _, s := range srcs { if timeNanos == 0 || s.TimeNanos < timeNanos { @@ -494,6 +495,9 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { if defaultSampleType == "" { defaultSampleType = s.DefaultSampleType } + if docURL == "" { + docURL = s.DocURL + } } p := &Profile{ @@ -509,6 +513,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) { Comments: comments, DefaultSampleType: defaultSampleType, + DocURL: docURL, } copy(p.SampleType, srcs[0].SampleType) return p, nil diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index 5551eb0bfa4..f47a243903e 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -39,6 +39,7 @@ type Profile struct { Location []*Location Function []*Function Comments []string + DocURL string DropFrames string KeepFrames string @@ -53,6 +54,7 @@ type Profile struct { encodeMu sync.Mutex commentX []int64 + docURLX int64 dropFramesX int64 keepFramesX int64 stringTable []string @@ -555,6 +557,9 @@ func (p *Profile) String() string { for _, c := range p.Comments { ss = append(ss, "Comment: "+c) } + if url := p.DocURL; url != "" { + ss = append(ss, fmt.Sprintf("Doc: %s", url)) + } if pt := p.PeriodType; pt != nil { ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) } @@ -844,7 +849,7 @@ func (p *Profile) HasFileLines() bool { // Unsymbolizable returns true if a mapping points to a binary for which // locations can't be symbolized in principle, at least now. Examples are -// "[vdso]", [vsyscall]" and some others, see the code. +// "[vdso]", "[vsyscall]" and some others, see the code. func (m *Mapping) Unsymbolizable() bool { name := filepath.Base(m.File) return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon" diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go b/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go index 9e9a711c29a..e6b33863bd4 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go @@ -19,12 +19,12 @@ func GenerateBigTestHistograms(numHistograms, numBuckets int) []*Histogram { bucketsPerSide := numBuckets / 2 spanLength := uint32(bucketsPerSide / numSpans) // Given all bucket deltas are 1, sum bucketsPerSide + 1. - observationCount := bucketsPerSide * (1 + bucketsPerSide) + observationCount := uint64(bucketsPerSide) * (1 + uint64(bucketsPerSide)) var histograms []*Histogram for i := 0; i < numHistograms; i++ { h := &Histogram{ - Count: uint64(i + observationCount), + Count: uint64(i) + observationCount, ZeroCount: uint64(i), ZeroThreshold: 1e-128, Sum: 18.4 * float64(i+1), diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go index ff756965f49..83e381539f1 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go @@ -177,61 +177,63 @@ func (p *NHCBParser) CreatedTimestamp() *int64 { } func (p *NHCBParser) Next() (Entry, error) { - if p.state == stateEmitting { - p.state = stateStart - if p.entry == EntrySeries { - isNHCB := p.handleClassicHistogramSeries(p.lset) - if isNHCB && !p.keepClassicHistograms { - // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. - return p.Next() + for { + if p.state == stateEmitting { + p.state = stateStart + if p.entry == EntrySeries { + isNHCB := p.handleClassicHistogramSeries(p.lset) + if isNHCB && !p.keepClassicHistograms { + // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. + continue + } } + return p.entry, p.err } - return p.entry, p.err - } - p.entry, p.err = p.parser.Next() - if p.err != nil { - if errors.Is(p.err, io.EOF) && p.processNHCB() { - return EntryHistogram, nil - } - return EntryInvalid, p.err - } - switch p.entry { - case EntrySeries: - p.bytes, p.ts, p.value = p.parser.Series() - p.metricString = p.parser.Metric(&p.lset) - // Check the label set to see if we can continue or need to emit the NHCB. - var isNHCB bool - if p.compareLabels() { - // Labels differ. Check if we can emit the NHCB. - if p.processNHCB() { + p.entry, p.err = p.parser.Next() + if p.err != nil { + if errors.Is(p.err, io.EOF) && p.processNHCB() { return EntryHistogram, nil } - isNHCB = p.handleClassicHistogramSeries(p.lset) - } else { - // Labels are the same. Check if after an exponential histogram. - if p.lastHistogramExponential { - isNHCB = false - } else { + return EntryInvalid, p.err + } + switch p.entry { + case EntrySeries: + p.bytes, p.ts, p.value = p.parser.Series() + p.metricString = p.parser.Metric(&p.lset) + // Check the label set to see if we can continue or need to emit the NHCB. + var isNHCB bool + if p.compareLabels() { + // Labels differ. Check if we can emit the NHCB. + if p.processNHCB() { + return EntryHistogram, nil + } isNHCB = p.handleClassicHistogramSeries(p.lset) + } else { + // Labels are the same. Check if after an exponential histogram. + if p.lastHistogramExponential { + isNHCB = false + } else { + isNHCB = p.handleClassicHistogramSeries(p.lset) + } + } + if isNHCB && !p.keepClassicHistograms { + // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. + continue } + return p.entry, p.err + case EntryHistogram: + p.bytes, p.ts, p.h, p.fh = p.parser.Histogram() + p.metricString = p.parser.Metric(&p.lset) + p.storeExponentialLabels() + case EntryType: + p.bName, p.typ = p.parser.Type() } - if isNHCB && !p.keepClassicHistograms { - // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. - return p.Next() + if p.processNHCB() { + return EntryHistogram, nil } return p.entry, p.err - case EntryHistogram: - p.bytes, p.ts, p.h, p.fh = p.parser.Histogram() - p.metricString = p.parser.Metric(&p.lset) - p.storeExponentialLabels() - case EntryType: - p.bName, p.typ = p.parser.Type() - } - if p.processNHCB() { - return EntryHistogram, nil } - return p.entry, p.err } // Return true if labels have changed and we should emit the NHCB. diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index 3f336188e9b..c8b05f8330c 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -436,6 +436,8 @@ func NewEngine(opts EngineOpts) *Engine { } // Close closes ng. +// Callers must ensure the engine is really no longer in use before calling this to avoid +// issues failures like in https://github.com/prometheus/prometheus/issues/15232 func (ng *Engine) Close() error { if ng == nil { return nil @@ -1353,7 +1355,7 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate } groups := make([]groupedAggregation, groupCount) - var k int + var k int64 var ratio float64 var seriess map[uint64]Series switch aggExpr.Op { @@ -1361,9 +1363,9 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate if !convertibleToInt64(param) { ev.errorf("Scalar value %v overflows int64", param) } - k = int(param) - if k > len(inputMatrix) { - k = len(inputMatrix) + k = int64(param) + if k > int64(len(inputMatrix)) { + k = int64(len(inputMatrix)) } if k < 1 { return nil, warnings @@ -3173,7 +3175,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix // seriesToResult maps inputMatrix indexes to groups indexes. // For an instant query, returns a Matrix in descending order for topk or ascending for bottomk, or without any order for limitk / limit_ratio. // For a range query, aggregates output in the seriess map. -func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, r float64, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) { +func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int64, r float64, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) { op := e.Op var s Sample var annos annotations.Annotations @@ -3244,7 +3246,7 @@ seriesLoop: case s.H != nil: // Ignore histogram sample and add info annotation. annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("topk", e.PosRange)) - case len(group.heap) < k: + case int64(len(group.heap)) < k: heap.Push(&group.heap, &s) case group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)): // This new element is bigger than the previous smallest element - overwrite that. @@ -3260,7 +3262,7 @@ seriesLoop: case s.H != nil: // Ignore histogram sample and add info annotation. annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("bottomk", e.PosRange)) - case len(group.heap) < k: + case int64(len(group.heap)) < k: heap.Push((*vectorByReverseValueHeap)(&group.heap), &s) case group.heap[0].F > s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)): // This new element is smaller than the previous biggest element - overwrite that. @@ -3271,13 +3273,13 @@ seriesLoop: } case parser.LIMITK: - if len(group.heap) < k { + if int64(len(group.heap)) < k { heap.Push(&group.heap, &s) } // LIMITK optimization: early break if we've added K elem to _every_ group, // especially useful for large timeseries where the user is exploring labels via e.g. // limitk(10, my_metric) - if !group.groupAggrComplete && len(group.heap) == k { + if !group.groupAggrComplete && int64(len(group.heap)) == k { group.groupAggrComplete = true groupsRemaining-- if groupsRemaining == 0 { diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index da1821fd18a..5f31a3db180 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -355,7 +355,7 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 { // https://en.wikipedia.org/wiki/Exponential_smoothing . func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] - + metricName := samples.Metric.Get(labels.MetricName) // The smoothing factor argument. sf := vals[1].(Vector)[0].F @@ -374,6 +374,10 @@ func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions // Can't do the smoothing operation with less than two points. if l < 2 { + // Annotate mix of float and histogram. + if l == 1 && len(samples.Histograms) > 0 { + return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return enh.Out, nil } @@ -394,7 +398,9 @@ func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions s0, s1 = s1, x+y } - + if len(samples.Histograms) > 0 { + return append(enh.Out, Sample{F: s1}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return append(enh.Out, Sample{F: s1}), nil } @@ -1110,10 +1116,15 @@ func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept f // === deriv(node parser.ValueTypeMatrix) (Vector, Annotations) === func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] + metricName := samples.Metric.Get(labels.MetricName) - // No sense in trying to compute a derivative without at least two points. + // No sense in trying to compute a derivative without at least two float points. // Drop this Vector element. if len(samples.Floats) < 2 { + // Annotate mix of float and histogram. + if len(samples.Floats) == 1 && len(samples.Histograms) > 0 { + return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return enh.Out, nil } @@ -1121,6 +1132,9 @@ func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper // to avoid floating point accuracy issues, see // https://github.com/prometheus/prometheus/issues/2674 slope, _ := linearRegression(samples.Floats, samples.Floats[0].T) + if len(samples.Histograms) > 0 { + return append(enh.Out, Sample{F: slope}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return append(enh.Out, Sample{F: slope}), nil } @@ -1128,13 +1142,22 @@ func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] duration := vals[1].(Vector)[0].F - // No sense in trying to predict anything without at least two points. + metricName := samples.Metric.Get(labels.MetricName) + + // No sense in trying to predict anything without at least two float points. // Drop this Vector element. if len(samples.Floats) < 2 { + // Annotate mix of float and histogram. + if len(samples.Floats) == 1 && len(samples.Histograms) > 0 { + return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return enh.Out, nil } - slope, intercept := linearRegression(samples.Floats, enh.Ts) + slope, intercept := linearRegression(samples.Floats, enh.Ts) + if len(samples.Histograms) > 0 { + return append(enh.Out, Sample{F: slope*duration + intercept}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return append(enh.Out, Sample{F: slope*duration + intercept}), nil } diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test index a00ed8a3ea6..6d2ade3abc4 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test @@ -234,11 +234,25 @@ clear load 5m http_requests{path="/foo"} 0 50 100 150 200 http_requests{path="/bar"} 200 150 100 50 0 + http_requests_gauge{path="/foo"} {{schema:0 sum:0 count:0 buckets:[0 0 0] counter_reset_hint:gauge}}+{{schema:0 sum:1 count:2 buckets:[1 1 1] counter_reset_hint:gauge}}x5 + http_requests_counter{path="/foo"} {{schema:0 sum:0 count:0 buckets:[0 0 0]}}+{{schema:0 sum:1 count:2 buckets:[1 1 1]}}x5 + http_requests_mix{path="/foo"} 0 50 100 {{schema:0 sum:0 count:0 buckets:[0 0 0] counter_reset_hint:gauge}} {{schema:0 sum:1 count:2 buckets:[1 1 1] counter_reset_hint:gauge}} eval instant at 20m delta(http_requests[20m]) {path="/foo"} 200 {path="/bar"} -200 +eval instant at 20m delta(http_requests_gauge[20m]) + {path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}} + +# delta emits warn annotation for non-gauge histogram types. +eval_warn instant at 20m delta(http_requests_counter[20m]) + {path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}} + +# delta emits warn annotation for mix of histogram and floats. +eval_warn instant at 20m delta(http_requests_mix[20m]) + #empty + clear # Tests for idelta(). @@ -258,7 +272,8 @@ load 5m http_requests_total{job="app-server", instance="1", group="canary"} 0+80x10 testcounter_reset_middle_mix 0+10x4 0+10x5 {{schema:0 sum:1 count:1}} {{schema:1 sum:2 count:2}} http_requests_mix{job="app-server", instance="1", group="canary"} 0+80x10 {{schema:0 sum:1 count:1}} - http_requests_histogram{job="app-server", instance="1", group="canary"} {{schema:0 sum:1 count:2}}x10 + http_requests_histogram{job="app-server", instance="1", group="canary"} {{schema:0 sum:1 count:2}}x10 + http_requests_inf{job="app-server", instance="1", group="canary"} -Inf 0+80x10 Inf # deriv should return the same as rate in simple cases. eval instant at 50m rate(http_requests_total{group="canary", instance="1", job="app-server"}[50m]) @@ -271,15 +286,20 @@ eval instant at 50m deriv(http_requests_total{group="canary", instance="1", job= eval instant at 50m deriv(testcounter_reset_middle_total[100m]) {} 0.010606060606060607 -# deriv should ignore histograms. -eval instant at 110m deriv(http_requests_mix{group="canary", instance="1", job="app-server"}[110m]) +# deriv should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation. +eval_info instant at 110m deriv(http_requests_mix{group="canary", instance="1", job="app-server"}[110m]) {group="canary", instance="1", job="app-server"} 0.26666666666666666 -eval instant at 100m deriv(testcounter_reset_middle_mix[110m]) +eval_info instant at 100m deriv(testcounter_reset_middle_mix[110m]) {} 0.010606060606060607 +# deriv should silently ignore ranges consisting only of histograms. eval instant at 50m deriv(http_requests_histogram[60m]) - #empty + #empty + +# deriv should return NaN in case of +Inf or -Inf found. +eval instant at 100m deriv(http_requests_inf[100m]) + {job="app-server", instance="1", group="canary"} NaN # predict_linear should return correct result. # X/s = [ 0, 300, 600, 900,1200,1500,1800,2100,2400,2700,3000] @@ -316,6 +336,21 @@ eval instant at 10m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3 eval instant at 70m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600) {} 89.54545454545455 +# predict_linear should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation. +eval_info instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 3000) + {} 70 + +eval_info instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 50m) + {} 70 + +# predict_linear should silently ignore ranges consisting only of histograms. +eval instant at 60m predict_linear(http_requests_histogram[60m], 50m) + #empty + +# predict_linear should return NaN in case of +Inf or -Inf found. +eval instant at 100m predict_linear(http_requests_inf[100m], 6000) + {job="app-server", instance="1", group="canary"} NaN + # With http_requests_total, there is a sample value exactly at the end of # the range, and it has exactly the predicted value, so predict_linear # can be emulated with deriv. @@ -719,6 +754,11 @@ load 10s http_requests{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 http_requests{job="api-server", instance="1", group="canary"} 0+40x2000 + http_requests_mix{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000 {{schema:0 count:1 sum:2}}x1000 + http_requests_mix{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 {{schema:0 count:1 sum:2}}x1000 + http_requests_mix{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 {{schema:0 count:1 sum:2}}x1000 + http_requests_mix{job="api-server", instance="1", group="canary"} 0+40x2000 {{schema:0 count:1 sum:2}}x1000 + http_requests_histogram{job="api-server", instance="1", group="canary"} {{schema:0 count:1 sum:2}}x1000 eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1) {job="api-server", instance="0", group="production"} 8000 @@ -726,6 +766,17 @@ eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1) {job="api-server", instance="0", group="canary"} 24000 {job="api-server", instance="1", group="canary"} 32000 +# double_exponential_smoothing should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation. +eval_info instant at 20010s double_exponential_smoothing(http_requests_mix[1m], 0.01, 0.1) + {job="api-server", instance="0", group="production"} 30100 + {job="api-server", instance="1", group="production"} 30200 + {job="api-server", instance="0", group="canary"} 80300 + {job="api-server", instance="1", group="canary"} 80000 + +# double_exponential_smoothing should silently ignore ranges consisting only of histograms. +eval instant at 10000s double_exponential_smoothing(http_requests_histogram[1m], 0.01, 0.1) + #empty + # negative trends clear load 10s diff --git a/vendor/github.com/prometheus/prometheus/rules/group.go b/vendor/github.com/prometheus/prometheus/rules/group.go index 8ad8958f8dd..0965dc27636 100644 --- a/vendor/github.com/prometheus/prometheus/rules/group.go +++ b/vendor/github.com/prometheus/prometheus/rules/group.go @@ -23,6 +23,9 @@ import ( "sync" "time" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" "go.uber.org/atomic" "github.com/prometheus/prometheus/promql/parser" @@ -30,9 +33,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" @@ -523,154 +523,152 @@ func (g *Group) CopyState(from *Group) { // Rules can be evaluated concurrently if the `concurrent-rule-eval` feature flag is enabled. func (g *Group) Eval(ctx context.Context, ts time.Time) { var ( - samplesTotal atomic.Float64 - wg sync.WaitGroup - + samplesTotal atomic.Float64 ruleQueryOffset = g.QueryOffset() ) - - for i, rule := range g.rules { - select { - case <-g.done: - // There's a chance that the group is asked to return early. In that case, we should - // wait for any in-flight rules to finish evaluating before returning so that we can preserve the same semantics. - // At the time of writing, the main reason for this was to make sure we don't clear seriesInPreviousEval before we're done using it. - wg.Wait() - return - default: + eval := func(i int, rule Rule, cleanup func()) { + if cleanup != nil { + defer cleanup() } - eval := func(i int, rule Rule, cleanup func()) { - if cleanup != nil { - defer cleanup() - } + logger := g.logger.With("name", rule.Name(), "index", i) + ctx, sp := otel.Tracer("").Start(ctx, "rule") + sp.SetAttributes(attribute.String("name", rule.Name())) + defer func(t time.Time) { + sp.End() - logger := g.logger.With("name", rule.Name(), "index", i) - ctx, sp := otel.Tracer("").Start(ctx, "rule") - sp.SetAttributes(attribute.String("name", rule.Name())) - defer func(t time.Time) { - sp.End() + since := time.Since(t) + g.metrics.EvalDuration.Observe(since.Seconds()) + rule.SetEvaluationDuration(since) + rule.SetEvaluationTimestamp(t) + }(time.Now()) - since := time.Since(t) - g.metrics.EvalDuration.Observe(since.Seconds()) - rule.SetEvaluationDuration(since) - rule.SetEvaluationTimestamp(t) - }(time.Now()) + if sp.SpanContext().IsSampled() && sp.SpanContext().HasTraceID() { + logger = logger.With("trace_id", sp.SpanContext().TraceID()) + } + + g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() - if sp.SpanContext().IsSampled() && sp.SpanContext().HasTraceID() { - logger = logger.With("trace_id", sp.SpanContext().TraceID()) + vector, err := rule.Eval(ctx, ruleQueryOffset, ts, g.opts.QueryFunc, g.opts.ExternalURL, g.Limit()) + if err != nil { + rule.SetHealth(HealthBad) + rule.SetLastError(err) + sp.SetStatus(codes.Error, err.Error()) + g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() + + // Canceled queries are intentional termination of queries. This normally + // happens on shutdown and thus we skip logging of any errors here. + var eqc promql.ErrQueryCanceled + if !errors.As(err, &eqc) { + logger.Warn("Evaluating rule failed", "rule", rule, "err", err) } + return + } + rule.SetHealth(HealthGood) + rule.SetLastError(nil) + samplesTotal.Add(float64(len(vector))) - g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() + if ar, ok := rule.(*AlertingRule); ok { + ar.sendAlerts(ctx, ts, g.opts.ResendDelay, g.interval, g.opts.NotifyFunc) + } + var ( + numOutOfOrder = 0 + numTooOld = 0 + numDuplicates = 0 + ) - vector, err := rule.Eval(ctx, ruleQueryOffset, ts, g.opts.QueryFunc, g.opts.ExternalURL, g.Limit()) - if err != nil { + app := g.opts.Appendable.Appender(ctx) + seriesReturned := make(map[string]labels.Labels, len(g.seriesInPreviousEval[i])) + defer func() { + if err := app.Commit(); err != nil { rule.SetHealth(HealthBad) rule.SetLastError(err) sp.SetStatus(codes.Error, err.Error()) g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() - // Canceled queries are intentional termination of queries. This normally - // happens on shutdown and thus we skip logging of any errors here. - var eqc promql.ErrQueryCanceled - if !errors.As(err, &eqc) { - logger.Warn("Evaluating rule failed", "rule", rule, "err", err) - } + logger.Warn("Rule sample appending failed", "err", err) return } - rule.SetHealth(HealthGood) - rule.SetLastError(nil) - samplesTotal.Add(float64(len(vector))) - - if ar, ok := rule.(*AlertingRule); ok { - ar.sendAlerts(ctx, ts, g.opts.ResendDelay, g.interval, g.opts.NotifyFunc) + g.seriesInPreviousEval[i] = seriesReturned + }() + + for _, s := range vector { + if s.H != nil { + _, err = app.AppendHistogram(0, s.Metric, s.T, nil, s.H) + } else { + app.SetOptions(g.appOpts) + _, err = app.Append(0, s.Metric, s.T, s.F) } - var ( - numOutOfOrder = 0 - numTooOld = 0 - numDuplicates = 0 - ) - app := g.opts.Appendable.Appender(ctx) - seriesReturned := make(map[string]labels.Labels, len(g.seriesInPreviousEval[i])) - defer func() { - if err := app.Commit(); err != nil { - rule.SetHealth(HealthBad) - rule.SetLastError(err) - sp.SetStatus(codes.Error, err.Error()) - g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() - - logger.Warn("Rule sample appending failed", "err", err) - return - } - g.seriesInPreviousEval[i] = seriesReturned - }() - - for _, s := range vector { - if s.H != nil { - _, err = app.AppendHistogram(0, s.Metric, s.T, nil, s.H) - } else { - app.SetOptions(g.appOpts) - _, err = app.Append(0, s.Metric, s.T, s.F) + if err != nil { + rule.SetHealth(HealthBad) + rule.SetLastError(err) + sp.SetStatus(codes.Error, err.Error()) + unwrappedErr := errors.Unwrap(err) + if unwrappedErr == nil { + unwrappedErr = err } - - if err != nil { - rule.SetHealth(HealthBad) - rule.SetLastError(err) - sp.SetStatus(codes.Error, err.Error()) - unwrappedErr := errors.Unwrap(err) - if unwrappedErr == nil { - unwrappedErr = err - } - switch { - case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample): - numOutOfOrder++ - logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) - case errors.Is(unwrappedErr, storage.ErrTooOldSample): - numTooOld++ - logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) - case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): - numDuplicates++ - logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) - default: - logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) - } - } else { - buf := [1024]byte{} - seriesReturned[string(s.Metric.Bytes(buf[:]))] = s.Metric + switch { + case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample): + numOutOfOrder++ + logger.Debug("Rule evaluation result discarded", "err", err, "sample", s) + case errors.Is(unwrappedErr, storage.ErrTooOldSample): + numTooOld++ + logger.Debug("Rule evaluation result discarded", "err", err, "sample", s) + case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): + numDuplicates++ + logger.Debug("Rule evaluation result discarded", "err", err, "sample", s) + default: + logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) } + } else { + buf := [1024]byte{} + seriesReturned[string(s.Metric.Bytes(buf[:]))] = s.Metric } - if numOutOfOrder > 0 { - logger.Warn("Error on ingesting out-of-order result from rule evaluation", "num_dropped", numOutOfOrder) - } - if numTooOld > 0 { - logger.Warn("Error on ingesting too old result from rule evaluation", "num_dropped", numTooOld) - } - if numDuplicates > 0 { - logger.Warn("Error on ingesting results from rule evaluation with different value but same timestamp", "num_dropped", numDuplicates) - } + } + if numOutOfOrder > 0 { + logger.Warn("Error on ingesting out-of-order result from rule evaluation", "num_dropped", numOutOfOrder) + } + if numTooOld > 0 { + logger.Warn("Error on ingesting too old result from rule evaluation", "num_dropped", numTooOld) + } + if numDuplicates > 0 { + logger.Warn("Error on ingesting results from rule evaluation with different value but same timestamp", "num_dropped", numDuplicates) + } - for metric, lset := range g.seriesInPreviousEval[i] { - if _, ok := seriesReturned[metric]; !ok { - // Series no longer exposed, mark it stale. - _, err = app.Append(0, lset, timestamp.FromTime(ts.Add(-ruleQueryOffset)), math.Float64frombits(value.StaleNaN)) - unwrappedErr := errors.Unwrap(err) - if unwrappedErr == nil { - unwrappedErr = err - } - switch { - case unwrappedErr == nil: - case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample), - errors.Is(unwrappedErr, storage.ErrTooOldSample), - errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): - // Do not count these in logging, as this is expected if series - // is exposed from a different rule. - default: - logger.Warn("Adding stale sample failed", "sample", lset.String(), "err", err) - } + for metric, lset := range g.seriesInPreviousEval[i] { + if _, ok := seriesReturned[metric]; !ok { + // Series no longer exposed, mark it stale. + _, err = app.Append(0, lset, timestamp.FromTime(ts.Add(-ruleQueryOffset)), math.Float64frombits(value.StaleNaN)) + unwrappedErr := errors.Unwrap(err) + if unwrappedErr == nil { + unwrappedErr = err + } + switch { + case unwrappedErr == nil: + case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample), + errors.Is(unwrappedErr, storage.ErrTooOldSample), + errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): + // Do not count these in logging, as this is expected if series + // is exposed from a different rule. + default: + logger.Warn("Adding stale sample failed", "sample", lset.String(), "err", err) } } } + } + + var wg sync.WaitGroup + for i, rule := range g.rules { + select { + case <-g.done: + // There's a chance that the group is asked to return early. In that case, we should + // wait for any in-flight rules to finish evaluating before returning so that we can preserve the same semantics. + // At the time of writing, the main reason for this was to make sure we don't clear seriesInPreviousEval before we're done using it. + wg.Wait() + return + default: + } if ctrl := g.concurrencyController; ctrl.Allow(ctx, g, rule) { wg.Add(1) @@ -683,7 +681,6 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { eval(i, rule, nil) } } - wg.Wait() g.metrics.GroupSamples.WithLabelValues(GroupKey(g.File(), g.Name())).Set(samplesTotal.Load()) diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index 4803354cf6f..2da07d719e0 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -450,7 +450,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { switch { case nonEmpty: all = append(all, t) - case !t.discoveredLabels.IsEmpty(): + default: if sp.config.KeepDroppedTargets == 0 || uint(len(sp.droppedTargets)) < sp.config.KeepDroppedTargets { sp.droppedTargets = append(sp.droppedTargets, t) } @@ -553,9 +553,9 @@ func (sp *scrapePool) sync(targets []*Target) { if _, ok := uniqueLoops[hash]; !ok { uniqueLoops[hash] = nil } - // Need to keep the most updated labels information - // for displaying it in the Service Discovery web page. - sp.activeTargets[hash].SetDiscoveredLabels(t.DiscoveredLabels()) + // Need to keep the most updated ScrapeConfig for + // displaying labels in the Service Discovery web page. + sp.activeTargets[hash].SetScrapeConfig(sp.config, t.tLabels, t.tgLabels) } } diff --git a/vendor/github.com/prometheus/prometheus/scrape/target.go b/vendor/github.com/prometheus/prometheus/scrape/target.go index 06d4737ff90..d05866f8630 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/target.go +++ b/vendor/github.com/prometheus/prometheus/scrape/target.go @@ -45,12 +45,12 @@ const ( // Target refers to a singular HTTP or HTTPS endpoint. type Target struct { - // Labels before any processing. - discoveredLabels labels.Labels // Any labels that are added to this target and its metrics. labels labels.Labels - // Additional URL parameters that are part of the target URL. - params url.Values + // ScrapeConfig used to create this target. + scrapeConfig *config.ScrapeConfig + // Target and TargetGroup labels used to create this target. + tLabels, tgLabels model.LabelSet mtx sync.RWMutex lastError error @@ -61,12 +61,13 @@ type Target struct { } // NewTarget creates a reasonably configured target for querying. -func NewTarget(labels, discoveredLabels labels.Labels, params url.Values) *Target { +func NewTarget(labels labels.Labels, scrapeConfig *config.ScrapeConfig, tLabels, tgLabels model.LabelSet) *Target { return &Target{ - labels: labels, - discoveredLabels: discoveredLabels, - params: params, - health: HealthUnknown, + labels: labels, + tLabels: tLabels, + tgLabels: tgLabels, + scrapeConfig: scrapeConfig, + health: HealthUnknown, } } @@ -168,11 +169,11 @@ func (t *Target) offset(interval time.Duration, offsetSeed uint64) time.Duration } // Labels returns a copy of the set of all public labels of the target. -func (t *Target) Labels(b *labels.ScratchBuilder) labels.Labels { - b.Reset() +func (t *Target) Labels(b *labels.Builder) labels.Labels { + b.Reset(labels.EmptyLabels()) t.labels.Range(func(l labels.Label) { if !strings.HasPrefix(l.Name, model.ReservedLabelPrefix) { - b.Add(l.Name, l.Value) + b.Set(l.Name, l.Value) } }) return b.Labels() @@ -188,24 +189,31 @@ func (t *Target) LabelsRange(f func(l labels.Label)) { } // DiscoveredLabels returns a copy of the target's labels before any processing. -func (t *Target) DiscoveredLabels() labels.Labels { +func (t *Target) DiscoveredLabels(lb *labels.Builder) labels.Labels { t.mtx.Lock() - defer t.mtx.Unlock() - return t.discoveredLabels.Copy() + cfg, tLabels, tgLabels := t.scrapeConfig, t.tLabels, t.tgLabels + t.mtx.Unlock() + PopulateDiscoveredLabels(lb, cfg, tLabels, tgLabels) + return lb.Labels() } -// SetDiscoveredLabels sets new DiscoveredLabels. -func (t *Target) SetDiscoveredLabels(l labels.Labels) { +// SetScrapeConfig sets new ScrapeConfig. +func (t *Target) SetScrapeConfig(scrapeConfig *config.ScrapeConfig, tLabels, tgLabels model.LabelSet) { t.mtx.Lock() defer t.mtx.Unlock() - t.discoveredLabels = l + t.scrapeConfig = scrapeConfig + t.tLabels = tLabels + t.tgLabels = tgLabels } // URL returns a copy of the target's URL. func (t *Target) URL() *url.URL { + t.mtx.Lock() + configParams := t.scrapeConfig.Params + t.mtx.Unlock() params := url.Values{} - for k, v := range t.params { + for k, v := range configParams { params[k] = make([]string, len(v)) copy(params[k], v) } @@ -420,10 +428,19 @@ func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels return ref, nil } -// PopulateLabels builds a label set from the given label set and scrape configuration. -// It returns a label set before relabeling was applied as the second return value. -// Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling. -func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) { +// PopulateDiscoveredLabels sets base labels on lb from target and group labels and scrape configuration, before relabeling. +func PopulateDiscoveredLabels(lb *labels.Builder, cfg *config.ScrapeConfig, tLabels, tgLabels model.LabelSet) { + lb.Reset(labels.EmptyLabels()) + + for ln, lv := range tLabels { + lb.Set(string(ln), string(lv)) + } + for ln, lv := range tgLabels { + if _, ok := tLabels[ln]; !ok { + lb.Set(string(ln), string(lv)) + } + } + // Copy labels into the labelset for the target if they are not set already. scrapeLabels := []labels.Label{ {Name: model.JobLabel, Value: cfg.JobName}, @@ -444,44 +461,49 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig) (res, orig lab lb.Set(name, v[0]) } } +} - preRelabelLabels := lb.Labels() +// PopulateLabels builds labels from target and group labels and scrape configuration, +// performs defined relabeling, checks validity, and adds Prometheus standard labels such as 'instance'. +// A return of empty labels and nil error means the target was dropped by relabeling. +func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, tLabels, tgLabels model.LabelSet) (res labels.Labels, err error) { + PopulateDiscoveredLabels(lb, cfg, tLabels, tgLabels) keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...) // Check if the target was dropped. if !keep { - return labels.EmptyLabels(), preRelabelLabels, nil + return labels.EmptyLabels(), nil } if v := lb.Get(model.AddressLabel); v == "" { - return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("no address") + return labels.EmptyLabels(), errors.New("no address") } addr := lb.Get(model.AddressLabel) if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { - return labels.EmptyLabels(), labels.EmptyLabels(), err + return labels.EmptyLabels(), err } interval := lb.Get(model.ScrapeIntervalLabel) intervalDuration, err := model.ParseDuration(interval) if err != nil { - return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("error parsing scrape interval: %w", err) + return labels.EmptyLabels(), fmt.Errorf("error parsing scrape interval: %w", err) } if time.Duration(intervalDuration) == 0 { - return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("scrape interval cannot be 0") + return labels.EmptyLabels(), errors.New("scrape interval cannot be 0") } timeout := lb.Get(model.ScrapeTimeoutLabel) timeoutDuration, err := model.ParseDuration(timeout) if err != nil { - return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("error parsing scrape timeout: %w", err) + return labels.EmptyLabels(), fmt.Errorf("error parsing scrape timeout: %w", err) } if time.Duration(timeoutDuration) == 0 { - return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("scrape timeout cannot be 0") + return labels.EmptyLabels(), errors.New("scrape timeout cannot be 0") } if timeoutDuration > intervalDuration { - return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("scrape timeout cannot be greater than scrape interval (%q > %q)", timeout, interval) + return labels.EmptyLabels(), fmt.Errorf("scrape timeout cannot be greater than scrape interval (%q > %q)", timeout, interval) } // Meta labels are deleted after relabelling. Other internal labels propagate to @@ -506,9 +528,9 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig) (res, orig lab return nil }) if err != nil { - return labels.EmptyLabels(), labels.EmptyLabels(), err + return labels.EmptyLabels(), err } - return res, preRelabelLabels, nil + return res, nil } // TargetsFromGroup builds targets based on the given TargetGroup and config. @@ -516,24 +538,12 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, targets [ targets = targets[:0] failures := []error{} - for i, tlset := range tg.Targets { - lb.Reset(labels.EmptyLabels()) - - for ln, lv := range tlset { - lb.Set(string(ln), string(lv)) - } - for ln, lv := range tg.Labels { - if _, ok := tlset[ln]; !ok { - lb.Set(string(ln), string(lv)) - } - } - - lset, origLabels, err := PopulateLabels(lb, cfg) + for i, tLabels := range tg.Targets { + lset, err := PopulateLabels(lb, cfg, tLabels, tg.Labels) if err != nil { failures = append(failures, fmt.Errorf("instance %d in group %s: %w", i, tg, err)) - } - if !lset.IsEmpty() || !origLabels.IsEmpty() { - targets = append(targets, NewTarget(lset, origLabels, cfg.Params)) + } else { + targets = append(targets, NewTarget(lset, cfg, tLabels, tg.Labels)) } } return targets, failures diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go deleted file mode 100644 index cb9257d0737..00000000000 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/helpers_from_stdlib.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Provenance-includes-location: https://github.com/golang/go/blob/f2d118fd5f7e872804a5825ce29797f81a28b0fa/src/strings/strings.go -// Provenance-includes-license: BSD-3-Clause -// Provenance-includes-copyright: Copyright The Go Authors. - -package prometheus - -import "strings" - -// fieldsFunc is a copy of strings.FieldsFunc from the Go standard library, -// but it also returns the separators as part of the result. -func fieldsFunc(s string, f func(rune) bool) ([]string, []string) { - // A span is used to record a slice of s of the form s[start:end]. - // The start index is inclusive and the end index is exclusive. - type span struct { - start int - end int - } - spans := make([]span, 0, 32) - separators := make([]string, 0, 32) - - // Find the field start and end indices. - // Doing this in a separate pass (rather than slicing the string s - // and collecting the result substrings right away) is significantly - // more efficient, possibly due to cache effects. - start := -1 // valid span start if >= 0 - for end, rune := range s { - if f(rune) { - if start >= 0 { - spans = append(spans, span{start, end}) - // Set start to a negative value. - // Note: using -1 here consistently and reproducibly - // slows down this code by a several percent on amd64. - start = ^start - separators = append(separators, string(s[end])) - } - } else { - if start < 0 { - start = end - } - } - } - - // Last field might end at EOF. - if start >= 0 { - spans = append(spans, span{start, len(s)}) - } - - // Create strings from recorded field indices. - a := make([]string, len(spans)) - for i, span := range spans { - a[i] = s[span.start:span.end] - } - - return a, separators -} - -// join is a copy of strings.Join from the Go standard library, -// but it also accepts a slice of separators to join the elements with. -// If the slice of separators is shorter than the slice of elements, use a default value. -// We also don't check for integer overflow. -func join(elems []string, separators []string, def string) string { - switch len(elems) { - case 0: - return "" - case 1: - return elems[0] - } - - var n int - var sep string - sepLen := len(separators) - for i, elem := range elems { - if i >= sepLen { - sep = def - } else { - sep = separators[i] - } - n += len(sep) + len(elem) - } - - var b strings.Builder - b.Grow(n) - b.WriteString(elems[0]) - for i, s := range elems[1:] { - if i >= sepLen { - sep = def - } else { - sep = separators[i] - } - b.WriteString(sep) - b.WriteString(s) - } - return b.String() -} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_name.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/metric_name_builder.go similarity index 54% rename from vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_name.go rename to vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/metric_name_builder.go index 6967ca013c2..8b5ea2a0464 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/metric_name_builder.go @@ -22,7 +22,6 @@ import ( "strings" "unicode" - "github.com/prometheus/prometheus/util/strutil" "go.opentelemetry.io/collector/pdata/pmetric" ) @@ -79,7 +78,7 @@ var perUnitMap = map[string]string{ "y": "year", } -// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric. +// BuildCompliantMetricName builds a Prometheus-compliant metric name for the specified metric. // // Metric name is prefixed with specified namespace and underscore (if any). // Namespace is not cleaned up. Make sure specified namespace follows Prometheus @@ -88,104 +87,49 @@ var perUnitMap = map[string]string{ // See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels, // https://prometheus.io/docs/practices/naming/#metric-and-label-naming // and https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. -func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes, allowUTF8 bool) string { +func BuildCompliantMetricName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string { // Full normalization following standard Prometheus naming conventions if addMetricSuffixes { - return normalizeName(metric, namespace, allowUTF8) + return normalizeName(metric, namespace) } - var metricName string - if !allowUTF8 { - // Regexp for metric name characters that should be replaced with _. - invalidMetricCharRE := regexp.MustCompile(`[^a-zA-Z0-9:_]`) - - // Simple case (no full normalization, no units, etc.). - metricName = strings.Join(strings.FieldsFunc(metric.Name(), func(r rune) bool { - return invalidMetricCharRE.MatchString(string(r)) - }), "_") - } else { - metricName = metric.Name() - } + // Simple case (no full normalization, no units, etc.). + metricName := strings.Join(strings.FieldsFunc(metric.Name(), func(r rune) bool { + return invalidMetricCharRE.MatchString(string(r)) + }), "_") // Namespace? if namespace != "" { return namespace + "_" + metricName } - // Metric name starts with a digit and utf8 not allowed? Prefix it with an underscore. - if metricName != "" && unicode.IsDigit(rune(metricName[0])) && !allowUTF8 { + // Metric name starts with a digit? Prefix it with an underscore. + if metricName != "" && unicode.IsDigit(rune(metricName[0])) { metricName = "_" + metricName } return metricName } -// Build a normalized name for the specified metric. -func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) string { - var nameTokens []string - var separators []string - if !allowUTF8 { - nonTokenMetricCharRE := regexp.MustCompile(`[^a-zA-Z0-9:]`) - // Split metric name into "tokens" (of supported metric name runes). - // Note that this has the side effect of replacing multiple consecutive underscores with a single underscore. - // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. - nameTokens = strings.FieldsFunc( - metric.Name(), - func(r rune) bool { return nonTokenMetricCharRE.MatchString(string(r)) }, - ) - } else { - translationFunc := func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != ':' } - // Split metric name into "tokens" (of supported metric name runes). - nameTokens, separators = fieldsFunc(metric.Name(), translationFunc) - } - - // Split unit at the '/' if any - unitTokens := strings.SplitN(metric.Unit(), "/", 2) - - // Main unit - // Append if not blank, doesn't contain '{}', and is not present in metric name already - if len(unitTokens) > 0 { - var mainUnitProm, perUnitProm string - mainUnitOTel := strings.TrimSpace(unitTokens[0]) - if mainUnitOTel != "" && !strings.ContainsAny(mainUnitOTel, "{}") { - mainUnitProm = unitMapGetOrDefault(mainUnitOTel) - if !allowUTF8 { - mainUnitProm = cleanUpUnit(mainUnitProm) - } - if slices.Contains(nameTokens, mainUnitProm) { - mainUnitProm = "" - } - } - - // Per unit - // Append if not blank, doesn't contain '{}', and is not present in metric name already - if len(unitTokens) > 1 && unitTokens[1] != "" { - perUnitOTel := strings.TrimSpace(unitTokens[1]) - if perUnitOTel != "" && !strings.ContainsAny(perUnitOTel, "{}") { - perUnitProm = perUnitMapGetOrDefault(perUnitOTel) - if !allowUTF8 { - perUnitProm = cleanUpUnit(perUnitProm) - } - } - if perUnitProm != "" { - perUnitProm = "per_" + perUnitProm - if slices.Contains(nameTokens, perUnitProm) { - perUnitProm = "" - } - } - } +var ( + nonMetricNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9:]`) + // Regexp for metric name characters that should be replaced with _. + invalidMetricCharRE = regexp.MustCompile(`[^a-zA-Z0-9:_]`) + multipleUnderscoresRE = regexp.MustCompile(`__+`) +) - if perUnitProm != "" { - mainUnitProm = strings.TrimSuffix(mainUnitProm, "_") - } +// Build a normalized name for the specified metric. +func normalizeName(metric pmetric.Metric, namespace string) string { + // Split metric name into "tokens" (of supported metric name runes). + // Note that this has the side effect of replacing multiple consecutive underscores with a single underscore. + // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. + nameTokens := strings.FieldsFunc( + metric.Name(), + func(r rune) bool { return nonMetricNameCharRE.MatchString(string(r)) }, + ) - if mainUnitProm != "" { - nameTokens = append(nameTokens, mainUnitProm) - } - if perUnitProm != "" { - nameTokens = append(nameTokens, perUnitProm) - } - } + mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(metric.Unit()) + nameTokens = addUnitTokens(nameTokens, cleanUpUnit(mainUnitSuffix), cleanUpUnit(perUnitSuffix)) // Append _total for Counters if metric.Type() == pmetric.MetricTypeSum && metric.Sum().IsMonotonic() { @@ -206,14 +150,8 @@ func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) stri nameTokens = append([]string{namespace}, nameTokens...) } - var normalizedName string - if !allowUTF8 { - // Build the string from the tokens, separated with underscores - normalizedName = strings.Join(nameTokens, "_") - } else { - // Build the string from the tokens + separators. - normalizedName = join(nameTokens, separators, "_") - } + // Build the string from the tokens, separated with underscores + normalizedName := strings.Join(nameTokens, "_") // Metric name cannot start with a digit, so prefix it with "_" in this case if normalizedName != "" && unicode.IsDigit(rune(normalizedName[0])) { @@ -223,73 +161,45 @@ func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) stri return normalizedName } -// TrimPromSuffixes trims type and unit prometheus suffixes from a metric name. -// Following the [OpenTelemetry specs] for converting Prometheus Metric points to OTLP. +// addUnitTokens will add the suffixes to the nameTokens if they are not already present. +// It will also remove trailing underscores from the main suffix to avoid double underscores +// when joining the tokens. // -// [OpenTelemetry specs]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#metric-metadata -func TrimPromSuffixes(promName string, metricType pmetric.MetricType, unit string) string { - nameTokens := strings.Split(promName, "_") - if len(nameTokens) == 1 { - return promName +// If the 'per' unit ends with underscore, the underscore will be removed. If the per unit is just +// 'per_', it will be entirely removed. +func addUnitTokens(nameTokens []string, mainUnitSuffix, perUnitSuffix string) []string { + if slices.Contains(nameTokens, mainUnitSuffix) { + mainUnitSuffix = "" } - nameTokens = removeTypeSuffixes(nameTokens, metricType) - nameTokens = removeUnitSuffixes(nameTokens, unit) - - return strings.Join(nameTokens, "_") -} - -func removeTypeSuffixes(tokens []string, metricType pmetric.MetricType) []string { - switch metricType { - case pmetric.MetricTypeSum: - // Only counters are expected to have a type suffix at this point. - // for other types, suffixes are removed during scrape. - return removeSuffix(tokens, "total") - default: - return tokens + if perUnitSuffix == "per_" { + perUnitSuffix = "" + } else { + perUnitSuffix = strings.TrimSuffix(perUnitSuffix, "_") + if slices.Contains(nameTokens, perUnitSuffix) { + perUnitSuffix = "" + } } -} - -func removeUnitSuffixes(nameTokens []string, unit string) []string { - l := len(nameTokens) - unitTokens := strings.Split(unit, "_") - lu := len(unitTokens) - if lu == 0 || l <= lu { - return nameTokens + if perUnitSuffix != "" { + mainUnitSuffix = strings.TrimSuffix(mainUnitSuffix, "_") } - suffixed := true - for i := range unitTokens { - if nameTokens[l-i-1] != unitTokens[lu-i-1] { - suffixed = false - break - } + if mainUnitSuffix != "" { + nameTokens = append(nameTokens, mainUnitSuffix) } - - if suffixed { - return nameTokens[:l-lu] + if perUnitSuffix != "" { + nameTokens = append(nameTokens, perUnitSuffix) } - return nameTokens } -func removeSuffix(tokens []string, suffix string) []string { - l := len(tokens) - if tokens[l-1] == suffix { - return tokens[:l-1] - } - - return tokens -} - // cleanUpUnit cleans up unit so it matches model.LabelNameRE. func cleanUpUnit(unit string) string { // Multiple consecutive underscores are replaced with a single underscore. // This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. - multipleUnderscoresRE := regexp.MustCompile(`__+`) return strings.TrimPrefix(multipleUnderscoresRE.ReplaceAllString( - strutil.SanitizeLabelName(unit), + nonMetricNameCharRE.ReplaceAllString(unit, "_"), "_", ), "_") } @@ -322,3 +232,75 @@ func removeItem(slice []string, value string) []string { } return newSlice } + +// BuildMetricName builds a valid metric name but without following Prometheus naming conventions. +// It doesn't do any character transformation, it only prefixes the metric name with the namespace, if any, +// and adds metric type suffixes, e.g. "_total" for counters and unit suffixes. +// +// Differently from BuildCompliantMetricName, it doesn't check for the presence of unit and type suffixes. +// If "addMetricSuffixes" is true, it will add them anyway. +// +// Please use BuildCompliantMetricName for a metric name that follows Prometheus naming conventions. +func BuildMetricName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string { + metricName := metric.Name() + + if namespace != "" { + metricName = namespace + "_" + metricName + } + + if addMetricSuffixes { + mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(metric.Unit()) + if mainUnitSuffix != "" { + metricName = metricName + "_" + mainUnitSuffix + } + if perUnitSuffix != "" { + metricName = metricName + "_" + perUnitSuffix + } + + // Append _total for Counters + if metric.Type() == pmetric.MetricTypeSum && metric.Sum().IsMonotonic() { + metricName = metricName + "_total" + } + + // Append _ratio for metrics with unit "1" + // Some OTel receivers improperly use unit "1" for counters of objects + // See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions + // Until these issues have been fixed, we're appending `_ratio` for gauges ONLY + // Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons) + if metric.Unit() == "1" && metric.Type() == pmetric.MetricTypeGauge { + metricName = metricName + "_ratio" + } + } + return metricName +} + +// buildUnitSuffixes builds the main and per unit suffixes for the specified unit +// but doesn't do any special character transformation to accommodate Prometheus naming conventions. +// Removing trailing underscores or appending suffixes is done in the caller. +func buildUnitSuffixes(unit string) (mainUnitSuffix, perUnitSuffix string) { + // Split unit at the '/' if any + unitTokens := strings.SplitN(unit, "/", 2) + + if len(unitTokens) > 0 { + // Main unit + // Update if not blank and doesn't contain '{}' + mainUnitOTel := strings.TrimSpace(unitTokens[0]) + if mainUnitOTel != "" && !strings.ContainsAny(mainUnitOTel, "{}") { + mainUnitSuffix = unitMapGetOrDefault(mainUnitOTel) + } + + // Per unit + // Update if not blank and doesn't contain '{}' + if len(unitTokens) > 1 && unitTokens[1] != "" { + perUnitOTel := strings.TrimSpace(unitTokens[1]) + if perUnitOTel != "" && !strings.ContainsAny(perUnitOTel, "{}") { + perUnitSuffix = perUnitMapGetOrDefault(perUnitOTel) + } + if perUnitSuffix != "" { + perUnitSuffix = "per_" + perUnitSuffix + } + } + } + + return mainUnitSuffix, perUnitSuffix +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 65fd0800476..aa2378d5d77 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -111,7 +111,12 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric continue } - promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes, settings.AllowUTF8) + var promName string + if settings.AllowUTF8 { + promName = prometheustranslator.BuildMetricName(metric, settings.Namespace, settings.AddMetricSuffixes) + } else { + promName = prometheustranslator.BuildCompliantMetricName(metric, settings.Namespace, settings.AddMetricSuffixes) + } c.metadata = append(c.metadata, prompb.MetricMetadata{ Type: otelMetricTypeToPromMetricType(metric), MetricFamilyName: promName, diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go index 49ec44dc086..487de25fecc 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go @@ -48,6 +48,8 @@ type writeHandler struct { samplesAppendedWithoutMetadata prometheus.Counter acceptedProtoMsgs map[config.RemoteWriteProtoMsg]struct{} + + ingestCTZeroSample bool } const maxAheadTime = 10 * time.Minute @@ -57,7 +59,7 @@ const maxAheadTime = 10 * time.Minute // // NOTE(bwplotka): When accepting v2 proto and spec, partial writes are possible // as per https://prometheus.io/docs/specs/remote_write_spec_2_0/#partial-write. -func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler { +func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg, ingestCTZeroSample bool) http.Handler { protoMsgs := map[config.RemoteWriteProtoMsg]struct{}{} for _, acc := range acceptedProtoMsgs { protoMsgs[acc] = struct{}{} @@ -78,6 +80,8 @@ func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable Name: "remote_write_without_metadata_appended_samples_total", Help: "The total number of received remote write samples (and histogram samples) which were ingested without corresponding metadata.", }), + + ingestCTZeroSample: ingestCTZeroSample, } return h } @@ -394,6 +398,17 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * var ref storage.SeriesRef // Samples. + if h.ingestCTZeroSample && len(ts.Samples) > 0 && ts.Samples[0].Timestamp != 0 && ts.CreatedTimestamp != 0 { + // CT only needs to be ingested for the first sample, it will be considered + // out of order for the rest. + ref, err = app.AppendCTZeroSample(ref, ls, ts.Samples[0].Timestamp, ts.CreatedTimestamp) + if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { + // Even for the first sample OOO is a common scenario because + // we can't tell if a CT was already ingested in a previous request. + // We ignore the error. + h.logger.Debug("Error when appending CT in remote write request", "err", err, "series", ls.String(), "created_timestamp", ts.CreatedTimestamp, "timestamp", ts.Samples[0].Timestamp) + } + } for _, s := range ts.Samples { ref, err = app.Append(ref, ls, s.GetTimestamp(), s.GetValue()) if err == nil { @@ -415,6 +430,17 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * // Native Histograms. for _, hp := range ts.Histograms { + if h.ingestCTZeroSample && hp.Timestamp != 0 && ts.CreatedTimestamp != 0 { + // Differently from samples, we need to handle CT for each histogram instead of just the first one. + // This is because histograms and float histograms are stored separately, even if they have the same labels. + ref, err = h.handleHistogramZeroSample(app, ref, ls, hp, ts.CreatedTimestamp) + if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { + // Even for the first sample OOO is a common scenario because + // we can't tell if a CT was already ingested in a previous request. + // We ignore the error. + h.logger.Debug("Error when appending CT in remote write request", "err", err, "series", ls.String(), "created_timestamp", ts.CreatedTimestamp, "timestamp", hp.Timestamp) + } + } if hp.IsFloatHistogram() { ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, nil, hp.ToFloatHistogram()) } else { @@ -479,6 +505,18 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * return samplesWithoutMetadata, http.StatusBadRequest, errors.Join(badRequestErrs...) } +// handleHistogramZeroSample appends CT as a zero-value sample with CT value as the sample timestamp. +// It doens't return errors in case of out of order CT. +func (h *writeHandler) handleHistogramZeroSample(app storage.Appender, ref storage.SeriesRef, l labels.Labels, hist writev2.Histogram, ct int64) (storage.SeriesRef, error) { + var err error + if hist.IsFloatHistogram() { + ref, err = app.AppendHistogramCTZeroSample(ref, l, hist.Timestamp, ct, nil, hist.ToFloatHistogram()) + } else { + ref, err = app.AppendHistogramCTZeroSample(ref, l, hist.Timestamp, ct, hist.ToIntHistogram(), nil) + } + return ref, err +} + // NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and // writes them to the provided appendable. func NewOTLPWriteHandler(logger *slog.Logger, appendable storage.Appendable, configFunc func() config.Config, enableCTZeroIngestion bool, validIntervalCTZeroIngestion time.Duration) http.Handler { diff --git a/vendor/github.com/prometheus/prometheus/template/template.go b/vendor/github.com/prometheus/prometheus/template/template.go index 0698c6c8ac7..25b65eb577f 100644 --- a/vendor/github.com/prometheus/prometheus/template/template.go +++ b/vendor/github.com/prometheus/prometheus/template/template.go @@ -30,6 +30,8 @@ import ( "github.com/grafana/regexp" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "golang.org/x/text/cases" + "golang.org/x/text/language" common_templates "github.com/prometheus/common/helpers/templates" @@ -166,7 +168,7 @@ func NewTemplateExpander( return html_template.HTML(text) }, "match": regexp.MatchString, - "title": strings.Title, //nolint:staticcheck // TODO(beorn7): Need to come up with a replacement using the cases package. + "title": cases.Title(language.AmericanEnglish, cases.NoLower).String, "toUpper": strings.ToUpper, "toLower": strings.ToLower, "graphLink": strutil.GraphLinkForExpression, diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index 9d97420f0c0..5ec576b6a7f 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -2446,7 +2446,7 @@ func isTmpDir(fi fs.DirEntry) bool { fn := fi.Name() ext := filepath.Ext(fn) if ext == tmpForDeletionBlockDirSuffix || ext == tmpForCreationBlockDirSuffix || ext == tmpLegacy { - if strings.HasPrefix(fn, "checkpoint.") { + if strings.HasPrefix(fn, wlog.CheckpointPrefix) { return true } if strings.HasPrefix(fn, chunkSnapshotPrefix) { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go index a4def2bc918..eb6a2592bf4 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go @@ -947,17 +947,37 @@ func (a *headAppender) log() error { } } if len(a.histograms) > 0 { - rec = enc.HistogramSamples(a.histograms, buf) + var customBucketsHistograms []record.RefHistogramSample + rec, customBucketsHistograms = enc.HistogramSamples(a.histograms, buf) buf = rec[:0] - if err := a.head.wal.Log(rec); err != nil { - return fmt.Errorf("log histograms: %w", err) + if len(rec) > 0 { + if err := a.head.wal.Log(rec); err != nil { + return fmt.Errorf("log histograms: %w", err) + } + } + + if len(customBucketsHistograms) > 0 { + rec = enc.CustomBucketsHistogramSamples(customBucketsHistograms, buf) + if err := a.head.wal.Log(rec); err != nil { + return fmt.Errorf("log custom buckets histograms: %w", err) + } } } if len(a.floatHistograms) > 0 { - rec = enc.FloatHistogramSamples(a.floatHistograms, buf) + var customBucketsFloatHistograms []record.RefFloatHistogramSample + rec, customBucketsFloatHistograms = enc.FloatHistogramSamples(a.floatHistograms, buf) buf = rec[:0] - if err := a.head.wal.Log(rec); err != nil { - return fmt.Errorf("log float histograms: %w", err) + if len(rec) > 0 { + if err := a.head.wal.Log(rec); err != nil { + return fmt.Errorf("log float histograms: %w", err) + } + } + + if len(customBucketsFloatHistograms) > 0 { + rec = enc.CustomBucketsFloatHistogramSamples(customBucketsFloatHistograms, buf) + if err := a.head.wal.Log(rec); err != nil { + return fmt.Errorf("log custom buckets float histograms: %w", err) + } } } // Exemplars should be logged after samples (float/native histogram/etc), @@ -1074,12 +1094,24 @@ func (acc *appenderCommitContext) collectOOORecords(a *headAppender) { acc.oooRecords = append(acc.oooRecords, r) } if len(acc.wblHistograms) > 0 { - r := acc.enc.HistogramSamples(acc.wblHistograms, a.head.getBytesBuffer()) - acc.oooRecords = append(acc.oooRecords, r) + r, customBucketsHistograms := acc.enc.HistogramSamples(acc.wblHistograms, a.head.getBytesBuffer()) + if len(r) > 0 { + acc.oooRecords = append(acc.oooRecords, r) + } + if len(customBucketsHistograms) > 0 { + r := acc.enc.CustomBucketsHistogramSamples(customBucketsHistograms, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } } if len(acc.wblFloatHistograms) > 0 { - r := acc.enc.FloatHistogramSamples(acc.wblFloatHistograms, a.head.getBytesBuffer()) - acc.oooRecords = append(acc.oooRecords, r) + r, customBucketsFloatHistograms := acc.enc.FloatHistogramSamples(acc.wblFloatHistograms, a.head.getBytesBuffer()) + if len(r) > 0 { + acc.oooRecords = append(acc.oooRecords, r) + } + if len(customBucketsFloatHistograms) > 0 { + r := acc.enc.CustomBucketsFloatHistogramSamples(customBucketsFloatHistograms, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } } acc.wblSamples = nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go index a3cd7b653d1..675639db0b0 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go @@ -103,20 +103,7 @@ func (h *headIndexReader) LabelNames(ctx context.Context, matchers ...*labels.Ma // Postings returns the postings list iterator for the label pairs. func (h *headIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) { - switch len(values) { - case 0: - return index.EmptyPostings(), nil - case 1: - return h.head.postings.Get(name, values[0]), nil - default: - res := make([]index.Postings, 0, len(values)) - for _, value := range values { - if p := h.head.postings.Get(name, value); !index.IsEmptyPostingsType(p) { - res = append(res, p) - } - } - return index.Merge(ctx, res...), nil - } + return h.head.postings.Postings(ctx, name, values...), nil } func (h *headIndexReader) PostingsForLabelMatching(ctx context.Context, name string, match func(string) bool) index.Postings { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go index 5b1a868837a..b255a969609 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go @@ -188,7 +188,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decoded <- exemplars - case record.HistogramSamples: + case record.HistogramSamples, record.CustomBucketsHistogramSamples: hists := histogramsPool.Get()[:0] hists, err = dec.HistogramSamples(rec, hists) if err != nil { @@ -200,7 +200,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decoded <- hists - case record.FloatHistogramSamples: + case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: hists := floatHistogramsPool.Get()[:0] hists, err = dec.FloatHistogramSamples(rec, hists) if err != nil { @@ -729,7 +729,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decodedCh <- markers - case record.HistogramSamples: + case record.HistogramSamples, record.CustomBucketsHistogramSamples: hists := histogramSamplesPool.Get()[:0] hists, err = dec.HistogramSamples(rec, hists) if err != nil { @@ -741,7 +741,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch return } decodedCh <- hists - case record.FloatHistogramSamples: + case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: hists := floatHistogramSamplesPool.Get()[:0] hists, err = dec.FloatHistogramSamples(rec, hists) if err != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go index 3e550ed5e65..18447f0b6fc 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go @@ -235,25 +235,9 @@ func (p *MemPostings) Stats(label string, limit int, labelSizeFunc func(string, } } -// Get returns a postings list for the given label pair. -func (p *MemPostings) Get(name, value string) Postings { - var lp []storage.SeriesRef - p.mtx.RLock() - l := p.m[name] - if l != nil { - lp = l[value] - } - p.mtx.RUnlock() - - if lp == nil { - return EmptyPostings() - } - return newListPostings(lp...) -} - // All returns a postings list over all documents ever added. func (p *MemPostings) All() Postings { - return p.Get(AllPostingsKey()) + return p.Postings(context.Background(), allPostingsKey.Name, allPostingsKey.Value) } // EnsureOrder ensures that all postings lists are sorted. After it returns all further @@ -490,7 +474,7 @@ func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string, } // Now `vals` only contains the values that matched, get their postings. - its := make([]Postings, 0, len(vals)) + its := make([]*ListPostings, 0, len(vals)) lps := make([]ListPostings, len(vals)) p.mtx.RLock() e := p.m[name] @@ -510,11 +494,27 @@ func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string, return Merge(ctx, its...) } +// Postings returns a postings iterator for the given label values. +func (p *MemPostings) Postings(ctx context.Context, name string, values ...string) Postings { + res := make([]*ListPostings, 0, len(values)) + lps := make([]ListPostings, len(values)) + p.mtx.RLock() + postingsMapForName := p.m[name] + for i, value := range values { + if lp := postingsMapForName[value]; lp != nil { + lps[i] = ListPostings{list: lp} + res = append(res, &lps[i]) + } + } + p.mtx.RUnlock() + return Merge(ctx, res...) +} + func (p *MemPostings) PostingsForAllLabelValues(ctx context.Context, name string) Postings { p.mtx.RLock() e := p.m[name] - its := make([]Postings, 0, len(e)) + its := make([]*ListPostings, 0, len(e)) lps := make([]ListPostings, len(e)) i := 0 for _, refs := range e { @@ -660,7 +660,7 @@ func (it *intersectPostings) Err() error { } // Merge returns a new iterator over the union of the input iterators. -func Merge(_ context.Context, its ...Postings) Postings { +func Merge[T Postings](_ context.Context, its ...T) Postings { if len(its) == 0 { return EmptyPostings() } @@ -675,19 +675,19 @@ func Merge(_ context.Context, its ...Postings) Postings { return p } -type mergedPostings struct { - p []Postings - h *loser.Tree[storage.SeriesRef, Postings] +type mergedPostings[T Postings] struct { + p []T + h *loser.Tree[storage.SeriesRef, T] cur storage.SeriesRef } -func newMergedPostings(p []Postings) (m *mergedPostings, nonEmpty bool) { +func newMergedPostings[T Postings](p []T) (m *mergedPostings[T], nonEmpty bool) { const maxVal = storage.SeriesRef(math.MaxUint64) // This value must be higher than all real values used in the tree. lt := loser.New(p, maxVal) - return &mergedPostings{p: p, h: lt}, true + return &mergedPostings[T]{p: p, h: lt}, true } -func (it *mergedPostings) Next() bool { +func (it *mergedPostings[T]) Next() bool { for { if !it.h.Next() { return false @@ -701,7 +701,7 @@ func (it *mergedPostings) Next() bool { } } -func (it *mergedPostings) Seek(id storage.SeriesRef) bool { +func (it *mergedPostings[T]) Seek(id storage.SeriesRef) bool { for !it.h.IsEmpty() && it.h.At() < id { finished := !it.h.Winner().Seek(id) it.h.Fix(finished) @@ -713,11 +713,11 @@ func (it *mergedPostings) Seek(id storage.SeriesRef) bool { return true } -func (it mergedPostings) At() storage.SeriesRef { +func (it mergedPostings[T]) At() storage.SeriesRef { return it.cur } -func (it mergedPostings) Err() error { +func (it mergedPostings[T]) Err() error { for _, p := range it.p { if err := p.Err(); err != nil { return err diff --git a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go index 784d0b23d7c..4d2a52b9af1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go @@ -52,6 +52,10 @@ const ( HistogramSamples Type = 7 // FloatHistogramSamples is used to match WAL records of type Float Histograms. FloatHistogramSamples Type = 8 + // CustomBucketsHistogramSamples is used to match WAL records of type Histogram with custom buckets. + CustomBucketsHistogramSamples Type = 9 + // CustomBucketsFloatHistogramSamples is used to match WAL records of type Float Histogram with custom buckets. + CustomBucketsFloatHistogramSamples Type = 10 ) func (rt Type) String() string { @@ -68,6 +72,10 @@ func (rt Type) String() string { return "histogram_samples" case FloatHistogramSamples: return "float_histogram_samples" + case CustomBucketsHistogramSamples: + return "custom_buckets_histogram_samples" + case CustomBucketsFloatHistogramSamples: + return "custom_buckets_float_histogram_samples" case MmapMarkers: return "mmapmarkers" case Metadata: @@ -207,7 +215,7 @@ func (d *Decoder) Type(rec []byte) Type { return Unknown } switch t := Type(rec[0]); t { - case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples, FloatHistogramSamples: + case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples, FloatHistogramSamples, CustomBucketsHistogramSamples, CustomBucketsFloatHistogramSamples: return t } return Unknown @@ -428,7 +436,7 @@ func (d *Decoder) MmapMarkers(rec []byte, markers []RefMmapMarker) ([]RefMmapMar func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) ([]RefHistogramSample, error) { dec := encoding.Decbuf{B: rec} t := Type(dec.Byte()) - if t != HistogramSamples { + if t != HistogramSamples && t != CustomBucketsHistogramSamples { return nil, errors.New("invalid record type") } if dec.Len() == 0 { @@ -505,12 +513,22 @@ func DecodeHistogram(buf *encoding.Decbuf, h *histogram.Histogram) { for i := range h.NegativeBuckets { h.NegativeBuckets[i] = buf.Varint64() } + + if histogram.IsCustomBucketsSchema(h.Schema) { + l = buf.Uvarint() + if l > 0 { + h.CustomValues = make([]float64, l) + } + for i := range h.CustomValues { + h.CustomValues[i] = buf.Be64Float64() + } + } } func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogramSample) ([]RefFloatHistogramSample, error) { dec := encoding.Decbuf{B: rec} t := Type(dec.Byte()) - if t != FloatHistogramSamples { + if t != FloatHistogramSamples && t != CustomBucketsFloatHistogramSamples { return nil, errors.New("invalid record type") } if dec.Len() == 0 { @@ -587,6 +605,16 @@ func DecodeFloatHistogram(buf *encoding.Decbuf, fh *histogram.FloatHistogram) { for i := range fh.NegativeBuckets { fh.NegativeBuckets[i] = buf.Be64Float64() } + + if histogram.IsCustomBucketsSchema(fh.Schema) { + l = buf.Uvarint() + if l > 0 { + fh.CustomValues = make([]float64, l) + } + for i := range fh.CustomValues { + fh.CustomValues[i] = buf.Be64Float64() + } + } } // Encoder encodes series, sample, and tombstones records. @@ -716,10 +744,44 @@ func (e *Encoder) MmapMarkers(markers []RefMmapMarker, b []byte) []byte { return buf.Get() } -func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) []byte { +func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) ([]byte, []RefHistogramSample) { buf := encoding.Encbuf{B: b} buf.PutByte(byte(HistogramSamples)) + if len(histograms) == 0 { + return buf.Get(), nil + } + var customBucketHistograms []RefHistogramSample + + // Store base timestamp and base reference number of first histogram. + // All histograms encode their timestamp and ref as delta to those. + first := histograms[0] + buf.PutBE64(uint64(first.Ref)) + buf.PutBE64int64(first.T) + + for _, h := range histograms { + if h.H.UsesCustomBuckets() { + customBucketHistograms = append(customBucketHistograms, h) + continue + } + buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) + buf.PutVarint64(h.T - first.T) + + EncodeHistogram(&buf, h.H) + } + + // Reset buffer if only custom bucket histograms existed in list of histogram samples. + if len(histograms) == len(customBucketHistograms) { + buf.Reset() + } + + return buf.Get(), customBucketHistograms +} + +func (e *Encoder) CustomBucketsHistogramSamples(histograms []RefHistogramSample, b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(CustomBucketsHistogramSamples)) + if len(histograms) == 0 { return buf.Get() } @@ -772,12 +834,54 @@ func EncodeHistogram(buf *encoding.Encbuf, h *histogram.Histogram) { for _, b := range h.NegativeBuckets { buf.PutVarint64(b) } + + if histogram.IsCustomBucketsSchema(h.Schema) { + buf.PutUvarint(len(h.CustomValues)) + for _, v := range h.CustomValues { + buf.PutBEFloat64(v) + } + } } -func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte { +func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) ([]byte, []RefFloatHistogramSample) { buf := encoding.Encbuf{B: b} buf.PutByte(byte(FloatHistogramSamples)) + if len(histograms) == 0 { + return buf.Get(), nil + } + + var customBucketsFloatHistograms []RefFloatHistogramSample + + // Store base timestamp and base reference number of first histogram. + // All histograms encode their timestamp and ref as delta to those. + first := histograms[0] + buf.PutBE64(uint64(first.Ref)) + buf.PutBE64int64(first.T) + + for _, h := range histograms { + if h.FH.UsesCustomBuckets() { + customBucketsFloatHistograms = append(customBucketsFloatHistograms, h) + continue + } + buf.PutVarint64(int64(h.Ref) - int64(first.Ref)) + buf.PutVarint64(h.T - first.T) + + EncodeFloatHistogram(&buf, h.FH) + } + + // Reset buffer if only custom bucket histograms existed in list of histogram samples + if len(histograms) == len(customBucketsFloatHistograms) { + buf.Reset() + } + + return buf.Get(), customBucketsFloatHistograms +} + +func (e *Encoder) CustomBucketsFloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(CustomBucketsFloatHistogramSamples)) + if len(histograms) == 0 { return buf.Get() } @@ -830,4 +934,11 @@ func EncodeFloatHistogram(buf *encoding.Encbuf, h *histogram.FloatHistogram) { for _, b := range h.NegativeBuckets { buf.PutBEFloat64(b) } + + if histogram.IsCustomBucketsSchema(h.Schema) { + buf.PutUvarint(len(h.CustomValues)) + for _, v := range h.CustomValues { + buf.PutBEFloat64(v) + } + } } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/testutil.go b/vendor/github.com/prometheus/prometheus/tsdb/testutil.go index c39eb133c7f..e957b0307b6 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/testutil.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/testutil.go @@ -29,11 +29,13 @@ import ( ) const ( - float = "float" - intHistogram = "integer histogram" - floatHistogram = "float histogram" - gaugeIntHistogram = "gauge int histogram" - gaugeFloatHistogram = "gauge float histogram" + float = "float" + intHistogram = "integer histogram" + floatHistogram = "float histogram" + customBucketsIntHistogram = "custom buckets int histogram" + customBucketsFloatHistogram = "custom buckets float histogram" + gaugeIntHistogram = "gauge int histogram" + gaugeFloatHistogram = "gauge float histogram" ) type testValue struct { @@ -63,45 +65,67 @@ var sampleTypeScenarios = map[string]sampleTypeScenario{ intHistogram: { sampleType: sampleMetricTypeHistogram, appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { - s := sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(value))} + s := sample{t: ts, h: tsdbutil.GenerateTestHistogram(value)} ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil) return ref, s, err }, sampleFunc: func(ts, value int64) sample { - return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(value))} + return sample{t: ts, h: tsdbutil.GenerateTestHistogram(value)} }, }, floatHistogram: { sampleType: sampleMetricTypeHistogram, appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { - s := sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(value))} + s := sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(value)} ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh) return ref, s, err }, sampleFunc: func(ts, value int64) sample { - return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(value))} + return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(value)} + }, + }, + customBucketsIntHistogram: { + sampleType: sampleMetricTypeHistogram, + appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { + s := sample{t: ts, h: tsdbutil.GenerateTestCustomBucketsHistogram(value)} + ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil) + return ref, s, err + }, + sampleFunc: func(ts, value int64) sample { + return sample{t: ts, h: tsdbutil.GenerateTestCustomBucketsHistogram(value)} + }, + }, + customBucketsFloatHistogram: { + sampleType: sampleMetricTypeHistogram, + appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { + s := sample{t: ts, fh: tsdbutil.GenerateTestCustomBucketsFloatHistogram(value)} + ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh) + return ref, s, err + }, + sampleFunc: func(ts, value int64) sample { + return sample{t: ts, fh: tsdbutil.GenerateTestCustomBucketsFloatHistogram(value)} }, }, gaugeIntHistogram: { sampleType: sampleMetricTypeHistogram, appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { - s := sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(int(value))} + s := sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(value)} ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil) return ref, s, err }, sampleFunc: func(ts, value int64) sample { - return sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(int(value))} + return sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(value)} }, }, gaugeFloatHistogram: { sampleType: sampleMetricTypeHistogram, appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) { - s := sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(int(value))} + s := sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(value)} ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh) return ref, s, err }, sampleFunc: func(ts, value int64) sample { - return sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(int(value))} + return sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(value)} }, }, } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go index ce934a638d9..a923519ef77 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go @@ -21,7 +21,7 @@ import ( func GenerateTestHistograms(n int) (r []*histogram.Histogram) { for i := 0; i < n; i++ { - h := GenerateTestHistogram(i) + h := GenerateTestHistogram(int64(i)) if i > 0 { h.CounterResetHint = histogram.NotCounterReset } @@ -31,13 +31,13 @@ func GenerateTestHistograms(n int) (r []*histogram.Histogram) { } func GenerateTestHistogramWithHint(n int, hint histogram.CounterResetHint) *histogram.Histogram { - h := GenerateTestHistogram(n) + h := GenerateTestHistogram(int64(n)) h.CounterResetHint = hint return h } // GenerateTestHistogram but it is up to the user to set any known counter reset hint. -func GenerateTestHistogram(i int) *histogram.Histogram { +func GenerateTestHistogram(i int64) *histogram.Histogram { return &histogram.Histogram{ Count: 12 + uint64(i*9), ZeroCount: 2 + uint64(i), @@ -48,16 +48,27 @@ func GenerateTestHistogram(i int) *histogram.Histogram { {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, - PositiveBuckets: []int64{int64(i + 1), 1, -1, 0}, + PositiveBuckets: []int64{i + 1, 1, -1, 0}, NegativeSpans: []histogram.Span{ {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, - NegativeBuckets: []int64{int64(i + 1), 1, -1, 0}, + NegativeBuckets: []int64{i + 1, 1, -1, 0}, } } -func GenerateTestCustomBucketsHistogram(i int) *histogram.Histogram { +func GenerateTestCustomBucketsHistograms(n int) (r []*histogram.Histogram) { + for i := 0; i < n; i++ { + h := GenerateTestCustomBucketsHistogram(int64(i)) + if i > 0 { + h.CounterResetHint = histogram.NotCounterReset + } + r = append(r, h) + } + return r +} + +func GenerateTestCustomBucketsHistogram(i int64) *histogram.Histogram { return &histogram.Histogram{ Count: 5 + uint64(i*4), Sum: 18.4 * float64(i+1), @@ -66,20 +77,20 @@ func GenerateTestCustomBucketsHistogram(i int) *histogram.Histogram { {Offset: 0, Length: 2}, {Offset: 1, Length: 2}, }, - PositiveBuckets: []int64{int64(i + 1), 1, -1, 0}, + PositiveBuckets: []int64{i + 1, 1, -1, 0}, CustomValues: []float64{0, 1, 2, 3, 4}, } } func GenerateTestGaugeHistograms(n int) (r []*histogram.Histogram) { for x := 0; x < n; x++ { - i := int(math.Sin(float64(x))*100) + 100 + i := int64(math.Sin(float64(x))*100) + 100 r = append(r, GenerateTestGaugeHistogram(i)) } return r } -func GenerateTestGaugeHistogram(i int) *histogram.Histogram { +func GenerateTestGaugeHistogram(i int64) *histogram.Histogram { h := GenerateTestHistogram(i) h.CounterResetHint = histogram.GaugeType return h @@ -87,7 +98,7 @@ func GenerateTestGaugeHistogram(i int) *histogram.Histogram { func GenerateTestFloatHistograms(n int) (r []*histogram.FloatHistogram) { for i := 0; i < n; i++ { - h := GenerateTestFloatHistogram(i) + h := GenerateTestFloatHistogram(int64(i)) if i > 0 { h.CounterResetHint = histogram.NotCounterReset } @@ -97,7 +108,7 @@ func GenerateTestFloatHistograms(n int) (r []*histogram.FloatHistogram) { } // GenerateTestFloatHistogram but it is up to the user to set any known counter reset hint. -func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram { +func GenerateTestFloatHistogram(i int64) *histogram.FloatHistogram { return &histogram.FloatHistogram{ Count: 12 + float64(i*9), ZeroCount: 2 + float64(i), @@ -117,7 +128,18 @@ func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram { } } -func GenerateTestCustomBucketsFloatHistogram(i int) *histogram.FloatHistogram { +func GenerateTestCustomBucketsFloatHistograms(n int) (r []*histogram.FloatHistogram) { + for i := 0; i < n; i++ { + h := GenerateTestCustomBucketsFloatHistogram(int64(i)) + if i > 0 { + h.CounterResetHint = histogram.NotCounterReset + } + r = append(r, h) + } + return r +} + +func GenerateTestCustomBucketsFloatHistogram(i int64) *histogram.FloatHistogram { return &histogram.FloatHistogram{ Count: 5 + float64(i*4), Sum: 18.4 * float64(i+1), @@ -133,13 +155,13 @@ func GenerateTestCustomBucketsFloatHistogram(i int) *histogram.FloatHistogram { func GenerateTestGaugeFloatHistograms(n int) (r []*histogram.FloatHistogram) { for x := 0; x < n; x++ { - i := int(math.Sin(float64(x))*100) + 100 + i := int64(math.Sin(float64(x))*100) + 100 r = append(r, GenerateTestGaugeFloatHistogram(i)) } return r } -func GenerateTestGaugeFloatHistogram(i int) *histogram.FloatHistogram { +func GenerateTestGaugeFloatHistogram(i int64) *histogram.FloatHistogram { h := GenerateTestFloatHistogram(i) h.CounterResetHint = histogram.GaugeType return h diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go index 58e11c770e0..5c607d70302 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go @@ -81,7 +81,8 @@ func DeleteCheckpoints(dir string, maxIndex int) error { return errs.Err() } -const checkpointPrefix = "checkpoint." +// CheckpointPrefix is the prefix used for checkpoint files. +const CheckpointPrefix = "checkpoint." // Checkpoint creates a compacted checkpoint of segments in range [from, to] in the given WAL. // It includes the most recent checkpoint if it exists. @@ -221,11 +222,27 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He } } if len(repl) > 0 { - buf = enc.HistogramSamples(repl, buf) + buf, _ = enc.HistogramSamples(repl, buf) + } + stats.TotalSamples += len(histogramSamples) + stats.DroppedSamples += len(histogramSamples) - len(repl) + case record.CustomBucketsHistogramSamples: + histogramSamples, err = dec.HistogramSamples(rec, histogramSamples) + if err != nil { + return nil, fmt.Errorf("decode histogram samples: %w", err) + } + // Drop irrelevant histogramSamples in place. + repl := histogramSamples[:0] + for _, h := range histogramSamples { + if h.T >= mint { + repl = append(repl, h) + } + } + if len(repl) > 0 { + buf = enc.CustomBucketsHistogramSamples(repl, buf) } stats.TotalSamples += len(histogramSamples) stats.DroppedSamples += len(histogramSamples) - len(repl) - case record.FloatHistogramSamples: floatHistogramSamples, err = dec.FloatHistogramSamples(rec, floatHistogramSamples) if err != nil { @@ -239,11 +256,27 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He } } if len(repl) > 0 { - buf = enc.FloatHistogramSamples(repl, buf) + buf, _ = enc.FloatHistogramSamples(repl, buf) + } + stats.TotalSamples += len(floatHistogramSamples) + stats.DroppedSamples += len(floatHistogramSamples) - len(repl) + case record.CustomBucketsFloatHistogramSamples: + floatHistogramSamples, err = dec.FloatHistogramSamples(rec, floatHistogramSamples) + if err != nil { + return nil, fmt.Errorf("decode float histogram samples: %w", err) + } + // Drop irrelevant floatHistogramSamples in place. + repl := floatHistogramSamples[:0] + for _, fh := range floatHistogramSamples { + if fh.T >= mint { + repl = append(repl, fh) + } + } + if len(repl) > 0 { + buf = enc.CustomBucketsFloatHistogramSamples(repl, buf) } stats.TotalSamples += len(floatHistogramSamples) stats.DroppedSamples += len(floatHistogramSamples) - len(repl) - case record.Tombstones: tstones, err = dec.Tombstones(rec, tstones) if err != nil { @@ -363,7 +396,7 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He } func checkpointDir(dir string, i int) string { - return filepath.Join(dir, fmt.Sprintf(checkpointPrefix+"%08d", i)) + return filepath.Join(dir, fmt.Sprintf(CheckpointPrefix+"%08d", i)) } type checkpointRef struct { @@ -379,13 +412,13 @@ func listCheckpoints(dir string) (refs []checkpointRef, err error) { for i := 0; i < len(files); i++ { fi := files[i] - if !strings.HasPrefix(fi.Name(), checkpointPrefix) { + if !strings.HasPrefix(fi.Name(), CheckpointPrefix) { continue } if !fi.IsDir() { return nil, fmt.Errorf("checkpoint %s is not a directory", fi.Name()) } - idx, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):]) + idx, err := strconv.Atoi(fi.Name()[len(CheckpointPrefix):]) if err != nil { continue } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go index 89db5d2dd72..6f1bc1df35a 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go @@ -546,7 +546,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { } w.writer.AppendExemplars(exemplars) - case record.HistogramSamples: + case record.HistogramSamples, record.CustomBucketsHistogramSamples: // Skip if experimental "histograms over remote write" is not enabled. if !w.sendHistograms { break @@ -574,7 +574,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { histogramsToSend = histogramsToSend[:0] } - case record.FloatHistogramSamples: + case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples: // Skip if experimental "histograms over remote write" is not enabled. if !w.sendHistograms { break diff --git a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go index 1b743f70576..5b2fde152bd 100644 --- a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go +++ b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go @@ -148,6 +148,7 @@ var ( HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo) IncompatibleTypesInBinOpInfo = fmt.Errorf("%w: incompatible sample types encountered for binary operator", PromQLInfo) HistogramIgnoredInAggregationInfo = fmt.Errorf("%w: ignored histogram in", PromQLInfo) + HistogramIgnoredInMixedRangeInfo = fmt.Errorf("%w: ignored histograms in a range containing both floats and histograms for metric name", PromQLInfo) ) type annoErr struct { @@ -293,3 +294,10 @@ func NewHistogramIgnoredInAggregationInfo(aggregation string, pos posrange.Posit Err: fmt.Errorf("%w %s aggregation", HistogramIgnoredInAggregationInfo, aggregation), } } + +func NewHistogramIgnoredInMixedRangeInfo(metricName string, pos posrange.PositionRange) error { + return annoErr{ + PositionRange: pos, + Err: fmt.Errorf("%w %q", HistogramIgnoredInMixedRangeInfo, metricName), + } +} diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/context.go b/vendor/github.com/prometheus/prometheus/util/testutil/context.go index 0c9e0f6f649..ea4b0e3746b 100644 --- a/vendor/github.com/prometheus/prometheus/util/testutil/context.go +++ b/vendor/github.com/prometheus/prometheus/util/testutil/context.go @@ -49,8 +49,8 @@ func (c *MockContext) Value(interface{}) interface{} { // MockContextErrAfter is a MockContext that will return an error after a certain // number of calls to Err(). type MockContextErrAfter struct { + count atomic.Uint64 MockContext - count atomic.Uint64 FailAfter uint64 } diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index 10c2ba2e0a4..caba3900f55 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -258,7 +258,7 @@ func NewAPI( rwEnabled bool, acceptRemoteWriteProtoMsgs []config.RemoteWriteProtoMsg, otlpEnabled bool, - enableCTZeroIngestion bool, + ctZeroIngestionEnabled bool, validIntervalCTZeroIngestion time.Duration, ) *API { a := &API{ @@ -303,10 +303,10 @@ func NewAPI( } if rwEnabled { - a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs) + a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs, ctZeroIngestionEnabled) } if otlpEnabled { - a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap, configFunc, enableCTZeroIngestion, validIntervalCTZeroIngestion) + a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap, configFunc, ctZeroIngestionEnabled, validIntervalCTZeroIngestion) } return a @@ -1085,12 +1085,12 @@ func (api *API) targets(r *http.Request) apiFuncResult { showActive := state == "" || state == "any" || state == "active" showDropped := state == "" || state == "any" || state == "dropped" res := &TargetDiscovery{} + builder := labels.NewBuilder(labels.EmptyLabels()) if showActive { targetsActive := api.targetRetriever(r.Context()).TargetsActive() activeKeys, numTargets := sortKeys(targetsActive) res.ActiveTargets = make([]*Target, 0, numTargets) - builder := labels.NewScratchBuilder(0) for _, key := range activeKeys { if scrapePool != "" && key != scrapePool { @@ -1106,8 +1106,8 @@ func (api *API) targets(r *http.Request) apiFuncResult { globalURL, err := getGlobalURL(target.URL(), api.globalURLOptions) res.ActiveTargets = append(res.ActiveTargets, &Target{ - DiscoveredLabels: target.DiscoveredLabels(), - Labels: target.Labels(&builder), + DiscoveredLabels: target.DiscoveredLabels(builder), + Labels: target.Labels(builder), ScrapePool: key, ScrapeURL: target.URL().String(), GlobalURL: globalURL.String(), @@ -1145,7 +1145,7 @@ func (api *API) targets(r *http.Request) apiFuncResult { } for _, target := range targetsDropped[key] { res.DroppedTargets = append(res.DroppedTargets, &DroppedTarget{ - DiscoveredLabels: target.DiscoveredLabels(), + DiscoveredLabels: target.DiscoveredLabels(builder), }) } } @@ -1183,7 +1183,7 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult { } } - builder := labels.NewScratchBuilder(0) + builder := labels.NewBuilder(labels.EmptyLabels()) metric := r.FormValue("metric") res := []metricMetadata{} for _, tt := range api.targetRetriever(r.Context()).TargetsActive() { @@ -1191,7 +1191,7 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult { if limit >= 0 && len(res) >= limit { break } - targetLabels := t.Labels(&builder) + targetLabels := t.Labels(builder) // Filter targets that don't satisfy the label matchers. if matchTarget != "" && !matchLabels(targetLabels, matchers) { continue diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go deleted file mode 100644 index 41b2c3fc038..00000000000 --- a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package tagencoding contains the tag encoding -// used interally by the stats collector. -package tagencoding // import "go.opencensus.io/internal/tagencoding" - -// Values represent the encoded buffer for the values. -type Values struct { - Buffer []byte - WriteIndex int - ReadIndex int -} - -func (vb *Values) growIfRequired(expected int) { - if len(vb.Buffer)-vb.WriteIndex < expected { - tmp := make([]byte, 2*(len(vb.Buffer)+1)+expected) - copy(tmp, vb.Buffer) - vb.Buffer = tmp - } -} - -// WriteValue is the helper method to encode Values from map[Key][]byte. -func (vb *Values) WriteValue(v []byte) { - length := len(v) & 0xff - vb.growIfRequired(1 + length) - - // writing length of v - vb.Buffer[vb.WriteIndex] = byte(length) - vb.WriteIndex++ - - if length == 0 { - // No value was encoded for this key - return - } - - // writing v - copy(vb.Buffer[vb.WriteIndex:], v[:length]) - vb.WriteIndex += length -} - -// ReadValue is the helper method to decode Values to a map[Key][]byte. -func (vb *Values) ReadValue() []byte { - // read length of v - length := int(vb.Buffer[vb.ReadIndex]) - vb.ReadIndex++ - if length == 0 { - // No value was encoded for this key - return nil - } - - // read value of v - v := make([]byte, length) - endIdx := vb.ReadIndex + length - copy(v, vb.Buffer[vb.ReadIndex:endIdx]) - vb.ReadIndex = endIdx - return v -} - -// Bytes returns a reference to already written bytes in the Buffer. -func (vb *Values) Bytes() []byte { - return vb.Buffer[:vb.WriteIndex] -} diff --git a/vendor/go.opencensus.io/metric/metricdata/doc.go b/vendor/go.opencensus.io/metric/metricdata/doc.go deleted file mode 100644 index 52a7b3bf850..00000000000 --- a/vendor/go.opencensus.io/metric/metricdata/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package metricdata contains the metrics data model. -// -// This is an EXPERIMENTAL package, and may change in arbitrary ways without -// notice. -package metricdata // import "go.opencensus.io/metric/metricdata" diff --git a/vendor/go.opencensus.io/metric/metricdata/exemplar.go b/vendor/go.opencensus.io/metric/metricdata/exemplar.go deleted file mode 100644 index 12695ce2dc7..00000000000 --- a/vendor/go.opencensus.io/metric/metricdata/exemplar.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricdata - -import ( - "time" -) - -// Exemplars keys. -const ( - AttachmentKeySpanContext = "SpanContext" -) - -// Exemplar is an example data point associated with each bucket of a -// distribution type aggregation. -// -// Their purpose is to provide an example of the kind of thing -// (request, RPC, trace span, etc.) that resulted in that measurement. -type Exemplar struct { - Value float64 // the value that was recorded - Timestamp time.Time // the time the value was recorded - Attachments Attachments // attachments (if any) -} - -// Attachments is a map of extra values associated with a recorded data point. -type Attachments map[string]interface{} diff --git a/vendor/go.opencensus.io/metric/metricdata/label.go b/vendor/go.opencensus.io/metric/metricdata/label.go deleted file mode 100644 index aadae41e6a2..00000000000 --- a/vendor/go.opencensus.io/metric/metricdata/label.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricdata - -// LabelKey represents key of a label. It has optional -// description attribute. -type LabelKey struct { - Key string - Description string -} - -// LabelValue represents the value of a label. -// The zero value represents a missing label value, which may be treated -// differently to an empty string value by some back ends. -type LabelValue struct { - Value string // string value of the label - Present bool // flag that indicated whether a value is present or not -} - -// NewLabelValue creates a new non-nil LabelValue that represents the given string. -func NewLabelValue(val string) LabelValue { - return LabelValue{Value: val, Present: true} -} diff --git a/vendor/go.opencensus.io/metric/metricdata/metric.go b/vendor/go.opencensus.io/metric/metricdata/metric.go deleted file mode 100644 index 8293712c77f..00000000000 --- a/vendor/go.opencensus.io/metric/metricdata/metric.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricdata - -import ( - "time" - - "go.opencensus.io/resource" -) - -// Descriptor holds metadata about a metric. -type Descriptor struct { - Name string // full name of the metric - Description string // human-readable description - Unit Unit // units for the measure - Type Type // type of measure - LabelKeys []LabelKey // label keys -} - -// Metric represents a quantity measured against a resource with different -// label value combinations. -type Metric struct { - Descriptor Descriptor // metric descriptor - Resource *resource.Resource // resource against which this was measured - TimeSeries []*TimeSeries // one time series for each combination of label values -} - -// TimeSeries is a sequence of points associated with a combination of label -// values. -type TimeSeries struct { - LabelValues []LabelValue // label values, same order as keys in the metric descriptor - Points []Point // points sequence - StartTime time.Time // time we started recording this time series -} diff --git a/vendor/go.opencensus.io/metric/metricdata/point.go b/vendor/go.opencensus.io/metric/metricdata/point.go deleted file mode 100644 index 7fe057b19cf..00000000000 --- a/vendor/go.opencensus.io/metric/metricdata/point.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricdata - -import ( - "time" -) - -// Point is a single data point of a time series. -type Point struct { - // Time is the point in time that this point represents in a time series. - Time time.Time - // Value is the value of this point. Prefer using ReadValue to switching on - // the value type, since new value types might be added. - Value interface{} -} - -//go:generate stringer -type ValueType - -// NewFloat64Point creates a new Point holding a float64 value. -func NewFloat64Point(t time.Time, val float64) Point { - return Point{ - Value: val, - Time: t, - } -} - -// NewInt64Point creates a new Point holding an int64 value. -func NewInt64Point(t time.Time, val int64) Point { - return Point{ - Value: val, - Time: t, - } -} - -// NewDistributionPoint creates a new Point holding a Distribution value. -func NewDistributionPoint(t time.Time, val *Distribution) Point { - return Point{ - Value: val, - Time: t, - } -} - -// NewSummaryPoint creates a new Point holding a Summary value. -func NewSummaryPoint(t time.Time, val *Summary) Point { - return Point{ - Value: val, - Time: t, - } -} - -// ValueVisitor allows reading the value of a point. -type ValueVisitor interface { - VisitFloat64Value(float64) - VisitInt64Value(int64) - VisitDistributionValue(*Distribution) - VisitSummaryValue(*Summary) -} - -// ReadValue accepts a ValueVisitor and calls the appropriate method with the -// value of this point. -// Consumers of Point should use this in preference to switching on the type -// of the value directly, since new value types may be added. -func (p Point) ReadValue(vv ValueVisitor) { - switch v := p.Value.(type) { - case int64: - vv.VisitInt64Value(v) - case float64: - vv.VisitFloat64Value(v) - case *Distribution: - vv.VisitDistributionValue(v) - case *Summary: - vv.VisitSummaryValue(v) - default: - panic("unexpected value type") - } -} - -// Distribution contains summary statistics for a population of values. It -// optionally contains a histogram representing the distribution of those -// values across a set of buckets. -type Distribution struct { - // Count is the number of values in the population. Must be non-negative. This value - // must equal the sum of the values in bucket_counts if a histogram is - // provided. - Count int64 - // Sum is the sum of the values in the population. If count is zero then this field - // must be zero. - Sum float64 - // SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the - // population. For values x_i this is: - // - // Sum[i=1..n]((x_i - mean)^2) - // - // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition - // describes Welford's method for accumulating this sum in one pass. - // - // If count is zero then this field must be zero. - SumOfSquaredDeviation float64 - // BucketOptions describes the bounds of the histogram buckets in this - // distribution. - // - // A Distribution may optionally contain a histogram of the values in the - // population. - // - // If nil, there is no associated histogram. - BucketOptions *BucketOptions - // Bucket If the distribution does not have a histogram, then omit this field. - // If there is a histogram, then the sum of the values in the Bucket counts - // must equal the value in the count field of the distribution. - Buckets []Bucket -} - -// BucketOptions describes the bounds of the histogram buckets in this -// distribution. -type BucketOptions struct { - // Bounds specifies a set of bucket upper bounds. - // This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket - // index i are: - // - // [0, Bounds[i]) for i == 0 - // [Bounds[i-1], Bounds[i]) for 0 < i < N-1 - // [Bounds[i-1], +infinity) for i == N-1 - Bounds []float64 -} - -// Bucket represents a single bucket (value range) in a distribution. -type Bucket struct { - // Count is the number of values in each bucket of the histogram, as described in - // bucket_bounds. - Count int64 - // Exemplar associated with this bucket (if any). - Exemplar *Exemplar -} - -// Summary is a representation of percentiles. -type Summary struct { - // Count is the cumulative count (if available). - Count int64 - // Sum is the cumulative sum of values (if available). - Sum float64 - // HasCountAndSum is true if Count and Sum are available. - HasCountAndSum bool - // Snapshot represents percentiles calculated over an arbitrary time window. - // The values in this struct can be reset at arbitrary unknown times, with - // the requirement that all of them are reset at the same time. - Snapshot Snapshot -} - -// Snapshot represents percentiles over an arbitrary time. -// The values in this struct can be reset at arbitrary unknown times, with -// the requirement that all of them are reset at the same time. -type Snapshot struct { - // Count is the number of values in the snapshot. Optional since some systems don't - // expose this. Set to 0 if not available. - Count int64 - // Sum is the sum of values in the snapshot. Optional since some systems don't - // expose this. If count is 0 then this field must be zero. - Sum float64 - // Percentiles is a map from percentile (range (0-100.0]) to the value of - // the percentile. - Percentiles map[float64]float64 -} - -//go:generate stringer -type Type - -// Type is the overall type of metric, including its value type and whether it -// represents a cumulative total (since the start time) or if it represents a -// gauge value. -type Type int - -// Metric types. -const ( - TypeGaugeInt64 Type = iota - TypeGaugeFloat64 - TypeGaugeDistribution - TypeCumulativeInt64 - TypeCumulativeFloat64 - TypeCumulativeDistribution - TypeSummary -) diff --git a/vendor/go.opencensus.io/metric/metricdata/type_string.go b/vendor/go.opencensus.io/metric/metricdata/type_string.go deleted file mode 100644 index c3f8ec27b53..00000000000 --- a/vendor/go.opencensus.io/metric/metricdata/type_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type Type"; DO NOT EDIT. - -package metricdata - -import "strconv" - -const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary" - -var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128} - -func (i Type) String() string { - if i < 0 || i >= Type(len(_Type_index)-1) { - return "Type(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Type_name[_Type_index[i]:_Type_index[i+1]] -} diff --git a/vendor/go.opencensus.io/metric/metricdata/unit.go b/vendor/go.opencensus.io/metric/metricdata/unit.go deleted file mode 100644 index b483a1371b0..00000000000 --- a/vendor/go.opencensus.io/metric/metricdata/unit.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricdata - -// Unit is a string encoded according to the case-sensitive abbreviations from the -// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html -type Unit string - -// Predefined units. To record against a unit not represented here, create your -// own Unit type constant from a string. -const ( - UnitDimensionless Unit = "1" - UnitBytes Unit = "By" - UnitMilliseconds Unit = "ms" -) diff --git a/vendor/go.opencensus.io/metric/metricproducer/manager.go b/vendor/go.opencensus.io/metric/metricproducer/manager.go deleted file mode 100644 index ca1f3904938..00000000000 --- a/vendor/go.opencensus.io/metric/metricproducer/manager.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricproducer - -import ( - "sync" -) - -// Manager maintains a list of active producers. Producers can register -// with the manager to allow readers to read all metrics provided by them. -// Readers can retrieve all producers registered with the manager, -// read metrics from the producers and export them. -type Manager struct { - mu sync.RWMutex - producers map[Producer]struct{} -} - -var prodMgr *Manager -var once sync.Once - -// GlobalManager is a single instance of producer manager -// that is used by all producers and all readers. -func GlobalManager() *Manager { - once.Do(func() { - prodMgr = &Manager{} - prodMgr.producers = make(map[Producer]struct{}) - }) - return prodMgr -} - -// AddProducer adds the producer to the Manager if it is not already present. -func (pm *Manager) AddProducer(producer Producer) { - if producer == nil { - return - } - pm.mu.Lock() - defer pm.mu.Unlock() - pm.producers[producer] = struct{}{} -} - -// DeleteProducer deletes the producer from the Manager if it is present. -func (pm *Manager) DeleteProducer(producer Producer) { - if producer == nil { - return - } - pm.mu.Lock() - defer pm.mu.Unlock() - delete(pm.producers, producer) -} - -// GetAll returns a slice of all producer currently registered with -// the Manager. For each call it generates a new slice. The slice -// should not be cached as registration may change at any time. It is -// typically called periodically by exporter to read metrics from -// the producers. -func (pm *Manager) GetAll() []Producer { - pm.mu.Lock() - defer pm.mu.Unlock() - producers := make([]Producer, len(pm.producers)) - i := 0 - for producer := range pm.producers { - producers[i] = producer - i++ - } - return producers -} diff --git a/vendor/go.opencensus.io/metric/metricproducer/producer.go b/vendor/go.opencensus.io/metric/metricproducer/producer.go deleted file mode 100644 index 6cee9ed1783..00000000000 --- a/vendor/go.opencensus.io/metric/metricproducer/producer.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metricproducer - -import ( - "go.opencensus.io/metric/metricdata" -) - -// Producer is a source of metrics. -type Producer interface { - // Read should return the current values of all metrics supported by this - // metric provider. - // The returned metrics should be unique for each combination of name and - // resource. - Read() []*metricdata.Metric -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client.go b/vendor/go.opencensus.io/plugin/ocgrpc/client.go deleted file mode 100644 index 2063b6f76a1..00000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocgrpc - -import ( - "context" - - "go.opencensus.io/trace" - "google.golang.org/grpc/stats" -) - -// ClientHandler implements a gRPC stats.Handler for recording OpenCensus stats and -// traces. Use with gRPC clients only. -type ClientHandler struct { - // StartOptions allows configuring the StartOptions used to create new spans. - // - // StartOptions.SpanKind will always be set to trace.SpanKindClient - // for spans started by this handler. - StartOptions trace.StartOptions -} - -// HandleConn exists to satisfy gRPC stats.Handler. -func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { - // no-op -} - -// TagConn exists to satisfy gRPC stats.Handler. -func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { - // no-op - return ctx -} - -// HandleRPC implements per-RPC tracing and stats instrumentation. -func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - traceHandleRPC(ctx, rs) - statsHandleRPC(ctx, rs) -} - -// TagRPC implements per-RPC context management. -func (c *ClientHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - ctx = c.traceTagRPC(ctx, rti) - ctx = c.statsTagRPC(ctx, rti) - return ctx -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go deleted file mode 100644 index fb3c19d6b64..00000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -// The following variables are measures are recorded by ClientHandler: -var ( - ClientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) - ClientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes) - ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) - ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes) - ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds) - ClientStartedRPCs = stats.Int64("grpc.io/client/started_rpcs", "Number of started client RPCs.", stats.UnitDimensionless) - ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) -) - -// Predefined views may be registered to collect data for the above measures. -// As always, you may also define your own custom views over measures collected by this -// package. These are declared as a convenience only; none are registered by -// default. -var ( - ClientSentBytesPerRPCView = &view.View{ - Measure: ClientSentBytesPerRPC, - Name: "grpc.io/client/sent_bytes_per_rpc", - Description: "Distribution of bytes sent per RPC, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultBytesDistribution, - } - - ClientReceivedBytesPerRPCView = &view.View{ - Measure: ClientReceivedBytesPerRPC, - Name: "grpc.io/client/received_bytes_per_rpc", - Description: "Distribution of bytes received per RPC, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultBytesDistribution, - } - - ClientRoundtripLatencyView = &view.View{ - Measure: ClientRoundtripLatency, - Name: "grpc.io/client/roundtrip_latency", - Description: "Distribution of round-trip latency, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultMillisecondsDistribution, - } - - // Purposely reuses the count from `ClientRoundtripLatency`, tagging - // with method and status to result in ClientCompletedRpcs. - ClientCompletedRPCsView = &view.View{ - Measure: ClientRoundtripLatency, - Name: "grpc.io/client/completed_rpcs", - Description: "Count of RPCs by method and status.", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - Aggregation: view.Count(), - } - - ClientStartedRPCsView = &view.View{ - Measure: ClientStartedRPCs, - Name: "grpc.io/client/started_rpcs", - Description: "Number of started client RPCs.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: view.Count(), - } - - ClientSentMessagesPerRPCView = &view.View{ - Measure: ClientSentMessagesPerRPC, - Name: "grpc.io/client/sent_messages_per_rpc", - Description: "Distribution of sent messages count per RPC, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultMessageCountDistribution, - } - - ClientReceivedMessagesPerRPCView = &view.View{ - Measure: ClientReceivedMessagesPerRPC, - Name: "grpc.io/client/received_messages_per_rpc", - Description: "Distribution of received messages count per RPC, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultMessageCountDistribution, - } - - ClientServerLatencyView = &view.View{ - Measure: ClientServerLatency, - Name: "grpc.io/client/server_latency", - Description: "Distribution of server latency as viewed by client, by method.", - TagKeys: []tag.Key{KeyClientMethod}, - Aggregation: DefaultMillisecondsDistribution, - } -) - -// DefaultClientViews are the default client views provided by this package. -var DefaultClientViews = []*view.View{ - ClientSentBytesPerRPCView, - ClientReceivedBytesPerRPCView, - ClientRoundtripLatencyView, - ClientCompletedRPCsView, -} - -// TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count. -// TODO(acetechnologist): This is temporary and will need to be replaced by a -// mechanism to load these defaults from a common repository/config shared by -// all supported languages. Likely a serialized protobuf of these defaults. diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go deleted file mode 100644 index b36349820d9..00000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "context" - "time" - - "go.opencensus.io/tag" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/stats" -) - -// statsTagRPC gets the tag.Map populated by the application code, serializes -// its tags into the GRPC metadata in order to be sent to the server. -func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { - startTime := time.Now() - if info == nil { - if grpclog.V(2) { - grpclog.Info("clientHandler.TagRPC called with nil info.") - } - return ctx - } - - d := &rpcData{ - startTime: startTime, - method: info.FullMethodName, - } - ts := tag.FromContext(ctx) - if ts != nil { - encoded := tag.Encode(ts) - ctx = stats.SetTags(ctx, encoded) - } - - return context.WithValue(ctx, rpcDataKey, d) -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go b/vendor/go.opencensus.io/plugin/ocgrpc/doc.go deleted file mode 100644 index 1370323fb71..00000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package ocgrpc contains OpenCensus stats and trace -// integrations for gRPC. -// -// Use ServerHandler for servers and ClientHandler for clients. -package ocgrpc // import "go.opencensus.io/plugin/ocgrpc" diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server.go b/vendor/go.opencensus.io/plugin/ocgrpc/server.go deleted file mode 100644 index 8a53e097274..00000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocgrpc - -import ( - "context" - - "google.golang.org/grpc/stats" - - "go.opencensus.io/trace" -) - -// ServerHandler implements gRPC stats.Handler recording OpenCensus stats and -// traces. Use with gRPC servers. -// -// When installed (see Example), tracing metadata is read from inbound RPCs -// by default. If no tracing metadata is present, or if the tracing metadata is -// present but the SpanContext isn't sampled, then a new trace may be started -// (as determined by Sampler). -type ServerHandler struct { - // IsPublicEndpoint may be set to true to always start a new trace around - // each RPC. Any SpanContext in the RPC metadata will be added as a linked - // span instead of making it the parent of the span created around the - // server RPC. - // - // Be aware that if you leave this false (the default) on a public-facing - // server, callers will be able to send tracing metadata in gRPC headers - // and trigger traces in your backend. - IsPublicEndpoint bool - - // StartOptions to use for to spans started around RPCs handled by this server. - // - // These will apply even if there is tracing metadata already - // present on the inbound RPC but the SpanContext is not sampled. This - // ensures that each service has some opportunity to be traced. If you would - // like to not add any additional traces for this gRPC service, set: - // - // StartOptions.Sampler = trace.ProbabilitySampler(0.0) - // - // StartOptions.SpanKind will always be set to trace.SpanKindServer - // for spans started by this handler. - StartOptions trace.StartOptions -} - -var _ stats.Handler = (*ServerHandler)(nil) - -// HandleConn exists to satisfy gRPC stats.Handler. -func (s *ServerHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { - // no-op -} - -// TagConn exists to satisfy gRPC stats.Handler. -func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { - // no-op - return ctx -} - -// HandleRPC implements per-RPC tracing and stats instrumentation. -func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - traceHandleRPC(ctx, rs) - statsHandleRPC(ctx, rs) -} - -// TagRPC implements per-RPC context management. -func (s *ServerHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - ctx = s.traceTagRPC(ctx, rti) - ctx = s.statsTagRPC(ctx, rti) - return ctx -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go deleted file mode 100644 index fe0e971086e..00000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -// The following variables are measures are recorded by ServerHandler: -var ( - ServerReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) - ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes) - ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) - ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes) - ServerStartedRPCs = stats.Int64("grpc.io/server/started_rpcs", "Number of started server RPCs.", stats.UnitDimensionless) - ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds) -) - -// TODO(acetechnologist): This is temporary and will need to be replaced by a -// mechanism to load these defaults from a common repository/config shared by -// all supported languages. Likely a serialized protobuf of these defaults. - -// Predefined views may be registered to collect data for the above measures. -// As always, you may also define your own custom views over measures collected by this -// package. These are declared as a convenience only; none are registered by -// default. -var ( - ServerReceivedBytesPerRPCView = &view.View{ - Name: "grpc.io/server/received_bytes_per_rpc", - Description: "Distribution of received bytes per RPC, by method.", - Measure: ServerReceivedBytesPerRPC, - TagKeys: []tag.Key{KeyServerMethod}, - Aggregation: DefaultBytesDistribution, - } - - ServerSentBytesPerRPCView = &view.View{ - Name: "grpc.io/server/sent_bytes_per_rpc", - Description: "Distribution of total sent bytes per RPC, by method.", - Measure: ServerSentBytesPerRPC, - TagKeys: []tag.Key{KeyServerMethod}, - Aggregation: DefaultBytesDistribution, - } - - ServerLatencyView = &view.View{ - Name: "grpc.io/server/server_latency", - Description: "Distribution of server latency in milliseconds, by method.", - TagKeys: []tag.Key{KeyServerMethod}, - Measure: ServerLatency, - Aggregation: DefaultMillisecondsDistribution, - } - - // Purposely reuses the count from `ServerLatency`, tagging - // with method and status to result in ServerCompletedRpcs. - ServerCompletedRPCsView = &view.View{ - Name: "grpc.io/server/completed_rpcs", - Description: "Count of RPCs by method and status.", - TagKeys: []tag.Key{KeyServerMethod, KeyServerStatus}, - Measure: ServerLatency, - Aggregation: view.Count(), - } - - ServerStartedRPCsView = &view.View{ - Measure: ServerStartedRPCs, - Name: "grpc.io/server/started_rpcs", - Description: "Number of started server RPCs.", - TagKeys: []tag.Key{KeyServerMethod}, - Aggregation: view.Count(), - } - - ServerReceivedMessagesPerRPCView = &view.View{ - Name: "grpc.io/server/received_messages_per_rpc", - Description: "Distribution of messages received count per RPC, by method.", - TagKeys: []tag.Key{KeyServerMethod}, - Measure: ServerReceivedMessagesPerRPC, - Aggregation: DefaultMessageCountDistribution, - } - - ServerSentMessagesPerRPCView = &view.View{ - Name: "grpc.io/server/sent_messages_per_rpc", - Description: "Distribution of messages sent count per RPC, by method.", - TagKeys: []tag.Key{KeyServerMethod}, - Measure: ServerSentMessagesPerRPC, - Aggregation: DefaultMessageCountDistribution, - } -) - -// DefaultServerViews are the default server views provided by this package. -var DefaultServerViews = []*view.View{ - ServerReceivedBytesPerRPCView, - ServerSentBytesPerRPCView, - ServerLatencyView, - ServerCompletedRPCsView, -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go deleted file mode 100644 index afcef023afb..00000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "time" - - "context" - - "go.opencensus.io/tag" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/stats" -) - -// statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from -// it and creates a new tag.Map and puts them into the returned context. -func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { - startTime := time.Now() - if info == nil { - if grpclog.V(2) { - grpclog.Infof("opencensus: TagRPC called with nil info.") - } - return ctx - } - d := &rpcData{ - startTime: startTime, - method: info.FullMethodName, - } - propagated := h.extractPropagatedTags(ctx) - ctx = tag.NewContext(ctx, propagated) - ctx, _ = tag.New(ctx, tag.Upsert(KeyServerMethod, methodName(info.FullMethodName))) - return context.WithValue(ctx, rpcDataKey, d) -} - -// extractPropagatedTags creates a new tag map containing the tags extracted from the -// gRPC metadata. -func (h *ServerHandler) extractPropagatedTags(ctx context.Context) *tag.Map { - buf := stats.Tags(ctx) - if buf == nil { - return nil - } - propagated, err := tag.Decode(buf) - if err != nil { - if grpclog.V(2) { - grpclog.Warningf("opencensus: Failed to decode tags from gRPC metadata failed to decode: %v", err) - } - return nil - } - return propagated -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go deleted file mode 100644 index 9cb27320ca1..00000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ocgrpc - -import ( - "context" - "strconv" - "strings" - "sync/atomic" - "time" - - "go.opencensus.io/metric/metricdata" - ocstats "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" -) - -type grpcInstrumentationKey string - -// rpcData holds the instrumentation RPC data that is needed between the start -// and end of an call. It holds the info that this package needs to keep track -// of between the various GRPC events. -type rpcData struct { - // reqCount and respCount has to be the first words - // in order to be 64-aligned on 32-bit architectures. - sentCount, sentBytes, recvCount, recvBytes int64 // access atomically - - // startTime represents the time at which TagRPC was invoked at the - // beginning of an RPC. It is an appoximation of the time when the - // application code invoked GRPC code. - startTime time.Time - method string -} - -// The following variables define the default hard-coded auxiliary data used by -// both the default GRPC client and GRPC server metrics. -var ( - DefaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) - DefaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) - DefaultMessageCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536) -) - -// Server tags are applied to the context used to process each RPC, as well as -// the measures at the end of each RPC. -var ( - KeyServerMethod = tag.MustNewKey("grpc_server_method") - KeyServerStatus = tag.MustNewKey("grpc_server_status") -) - -// Client tags are applied to measures at the end of each RPC. -var ( - KeyClientMethod = tag.MustNewKey("grpc_client_method") - KeyClientStatus = tag.MustNewKey("grpc_client_status") -) - -var ( - rpcDataKey = grpcInstrumentationKey("opencensus-rpcData") -) - -func methodName(fullname string) string { - return strings.TrimLeft(fullname, "/") -} - -// statsHandleRPC processes the RPC events. -func statsHandleRPC(ctx context.Context, s stats.RPCStats) { - switch st := s.(type) { - case *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: - // do nothing for client - case *stats.Begin: - handleRPCBegin(ctx, st) - case *stats.OutPayload: - handleRPCOutPayload(ctx, st) - case *stats.InPayload: - handleRPCInPayload(ctx, st) - case *stats.End: - handleRPCEnd(ctx, st) - default: - grpclog.Infof("unexpected stats: %T", st) - } -} - -func handleRPCBegin(ctx context.Context, s *stats.Begin) { - d, ok := ctx.Value(rpcDataKey).(*rpcData) - if !ok { - if grpclog.V(2) { - grpclog.Infoln("Failed to retrieve *rpcData from context.") - } - } - - if s.IsClient() { - ocstats.RecordWithOptions(ctx, - ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), - ocstats.WithMeasurements(ClientStartedRPCs.M(1))) - } else { - ocstats.RecordWithOptions(ctx, - ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), - ocstats.WithMeasurements(ServerStartedRPCs.M(1))) - } -} - -func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { - d, ok := ctx.Value(rpcDataKey).(*rpcData) - if !ok { - if grpclog.V(2) { - grpclog.Infoln("Failed to retrieve *rpcData from context.") - } - return - } - - atomic.AddInt64(&d.sentBytes, int64(s.Length)) - atomic.AddInt64(&d.sentCount, 1) -} - -func handleRPCInPayload(ctx context.Context, s *stats.InPayload) { - d, ok := ctx.Value(rpcDataKey).(*rpcData) - if !ok { - if grpclog.V(2) { - grpclog.Infoln("Failed to retrieve *rpcData from context.") - } - return - } - - atomic.AddInt64(&d.recvBytes, int64(s.Length)) - atomic.AddInt64(&d.recvCount, 1) -} - -func handleRPCEnd(ctx context.Context, s *stats.End) { - d, ok := ctx.Value(rpcDataKey).(*rpcData) - if !ok { - if grpclog.V(2) { - grpclog.Infoln("Failed to retrieve *rpcData from context.") - } - return - } - - elapsedTime := time.Since(d.startTime) - - var st string - if s.Error != nil { - s, ok := status.FromError(s.Error) - if ok { - st = statusCodeToString(s) - } - } else { - st = "OK" - } - - latencyMillis := float64(elapsedTime) / float64(time.Millisecond) - attachments := getSpanCtxAttachment(ctx) - if s.Client { - ocstats.RecordWithOptions(ctx, - ocstats.WithTags( - tag.Upsert(KeyClientMethod, methodName(d.method)), - tag.Upsert(KeyClientStatus, st)), - ocstats.WithAttachments(attachments), - ocstats.WithMeasurements( - ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), - ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), - ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), - ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), - ClientRoundtripLatency.M(latencyMillis))) - } else { - ocstats.RecordWithOptions(ctx, - ocstats.WithTags( - tag.Upsert(KeyServerStatus, st), - ), - ocstats.WithAttachments(attachments), - ocstats.WithMeasurements( - ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), - ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), - ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), - ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), - ServerLatency.M(latencyMillis))) - } -} - -func statusCodeToString(s *status.Status) string { - // see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md - switch c := s.Code(); c { - case codes.OK: - return "OK" - case codes.Canceled: - return "CANCELLED" - case codes.Unknown: - return "UNKNOWN" - case codes.InvalidArgument: - return "INVALID_ARGUMENT" - case codes.DeadlineExceeded: - return "DEADLINE_EXCEEDED" - case codes.NotFound: - return "NOT_FOUND" - case codes.AlreadyExists: - return "ALREADY_EXISTS" - case codes.PermissionDenied: - return "PERMISSION_DENIED" - case codes.ResourceExhausted: - return "RESOURCE_EXHAUSTED" - case codes.FailedPrecondition: - return "FAILED_PRECONDITION" - case codes.Aborted: - return "ABORTED" - case codes.OutOfRange: - return "OUT_OF_RANGE" - case codes.Unimplemented: - return "UNIMPLEMENTED" - case codes.Internal: - return "INTERNAL" - case codes.Unavailable: - return "UNAVAILABLE" - case codes.DataLoss: - return "DATA_LOSS" - case codes.Unauthenticated: - return "UNAUTHENTICATED" - default: - return "CODE_" + strconv.FormatInt(int64(c), 10) - } -} - -func getSpanCtxAttachment(ctx context.Context) metricdata.Attachments { - attachments := map[string]interface{}{} - span := trace.FromContext(ctx) - if span == nil { - return attachments - } - spanCtx := span.SpanContext() - if spanCtx.IsSampled() { - attachments[metricdata.AttachmentKeySpanContext] = spanCtx - } - return attachments -} diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go deleted file mode 100644 index 61bc543d0a2..00000000000 --- a/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocgrpc - -import ( - "context" - "strings" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/stats" - "google.golang.org/grpc/status" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -const traceContextKey = "grpc-trace-bin" - -// TagRPC creates a new trace span for the client side of the RPC. -// -// It returns ctx with the new trace span added and a serialization of the -// SpanContext added to the outgoing gRPC metadata. -func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - name := strings.TrimPrefix(rti.FullMethodName, "/") - name = strings.Replace(name, "/", ".", -1) - ctx, span := trace.StartSpan(ctx, name, - trace.WithSampler(c.StartOptions.Sampler), - trace.WithSpanKind(trace.SpanKindClient)) // span is ended by traceHandleRPC - traceContextBinary := propagation.Binary(span.SpanContext()) - return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary)) -} - -// TagRPC creates a new trace span for the server side of the RPC. -// -// It checks the incoming gRPC metadata in ctx for a SpanContext, and if -// it finds one, uses that SpanContext as the parent context of the new span. -// -// It returns ctx, with the new trace span added. -func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { - md, _ := metadata.FromIncomingContext(ctx) - name := strings.TrimPrefix(rti.FullMethodName, "/") - name = strings.Replace(name, "/", ".", -1) - traceContext := md[traceContextKey] - var ( - parent trace.SpanContext - haveParent bool - ) - if len(traceContext) > 0 { - // Metadata with keys ending in -bin are actually binary. They are base64 - // encoded before being put on the wire, see: - // https://github.com/grpc/grpc-go/blob/08d6261/Documentation/grpc-metadata.md#storing-binary-data-in-metadata - traceContextBinary := []byte(traceContext[0]) - parent, haveParent = propagation.FromBinary(traceContextBinary) - if haveParent && !s.IsPublicEndpoint { - ctx, _ := trace.StartSpanWithRemoteParent(ctx, name, parent, - trace.WithSpanKind(trace.SpanKindServer), - trace.WithSampler(s.StartOptions.Sampler), - ) - return ctx - } - } - ctx, span := trace.StartSpan(ctx, name, - trace.WithSpanKind(trace.SpanKindServer), - trace.WithSampler(s.StartOptions.Sampler)) - if haveParent { - span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild}) - } - return ctx -} - -func traceHandleRPC(ctx context.Context, rs stats.RPCStats) { - span := trace.FromContext(ctx) - // TODO: compressed and uncompressed sizes are not populated in every message. - switch rs := rs.(type) { - case *stats.Begin: - span.AddAttributes( - trace.BoolAttribute("Client", rs.Client), - trace.BoolAttribute("FailFast", rs.FailFast)) - case *stats.InPayload: - span.AddMessageReceiveEvent(0 /* TODO: messageID */, int64(rs.Length), int64(rs.WireLength)) - case *stats.OutPayload: - span.AddMessageSendEvent(0, int64(rs.Length), int64(rs.WireLength)) - case *stats.End: - if rs.Error != nil { - s, ok := status.FromError(rs.Error) - if ok { - span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()}) - } else { - span.SetStatus(trace.Status{Code: int32(codes.Internal), Message: rs.Error.Error()}) - } - } - span.End() - } -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go deleted file mode 100644 index da815b2a734..00000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/client.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "net/http" - "net/http/httptrace" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -// Transport is an http.RoundTripper that instruments all outgoing requests with -// OpenCensus stats and tracing. -// -// The zero value is intended to be a useful default, but for -// now it's recommended that you explicitly set Propagation, since the default -// for this may change. -type Transport struct { - // Base may be set to wrap another http.RoundTripper that does the actual - // requests. By default http.DefaultTransport is used. - // - // If base HTTP roundtripper implements CancelRequest, - // the returned round tripper will be cancelable. - Base http.RoundTripper - - // Propagation defines how traces are propagated. If unspecified, a default - // (currently B3 format) will be used. - Propagation propagation.HTTPFormat - - // StartOptions are applied to the span started by this Transport around each - // request. - // - // StartOptions.SpanKind will always be set to trace.SpanKindClient - // for spans started by this transport. - StartOptions trace.StartOptions - - // GetStartOptions allows to set start options per request. If set, - // StartOptions is going to be ignored. - GetStartOptions func(*http.Request) trace.StartOptions - - // NameFromRequest holds the function to use for generating the span name - // from the information found in the outgoing HTTP Request. By default the - // name equals the URL Path. - FormatSpanName func(*http.Request) string - - // NewClientTrace may be set to a function allowing the current *trace.Span - // to be annotated with HTTP request event information emitted by the - // httptrace package. - NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace - - // TODO: Implement tag propagation for HTTP. -} - -// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - rt := t.base() - if isHealthEndpoint(req.URL.Path) { - return rt.RoundTrip(req) - } - // TODO: remove excessive nesting of http.RoundTrippers here. - format := t.Propagation - if format == nil { - format = defaultFormat - } - spanNameFormatter := t.FormatSpanName - if spanNameFormatter == nil { - spanNameFormatter = spanNameFromURL - } - - startOpts := t.StartOptions - if t.GetStartOptions != nil { - startOpts = t.GetStartOptions(req) - } - - rt = &traceTransport{ - base: rt, - format: format, - startOptions: trace.StartOptions{ - Sampler: startOpts.Sampler, - SpanKind: trace.SpanKindClient, - }, - formatSpanName: spanNameFormatter, - newClientTrace: t.NewClientTrace, - } - rt = statsTransport{base: rt} - return rt.RoundTrip(req) -} - -func (t *Transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *Transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - cr.CancelRequest(req) - } -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go deleted file mode 100644 index 17142aabe00..00000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "context" - "io" - "net/http" - "strconv" - "sync" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/tag" -) - -// statsTransport is an http.RoundTripper that collects stats for the outgoing requests. -type statsTransport struct { - base http.RoundTripper -} - -// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. -func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { - ctx, _ := tag.New(req.Context(), - tag.Upsert(KeyClientHost, req.Host), - tag.Upsert(Host, req.Host), - tag.Upsert(KeyClientPath, req.URL.Path), - tag.Upsert(Path, req.URL.Path), - tag.Upsert(KeyClientMethod, req.Method), - tag.Upsert(Method, req.Method)) - req = req.WithContext(ctx) - track := &tracker{ - start: time.Now(), - ctx: ctx, - } - if req.Body == nil { - // TODO: Handle cases where ContentLength is not set. - track.reqSize = -1 - } else if req.ContentLength > 0 { - track.reqSize = req.ContentLength - } - stats.Record(ctx, ClientRequestCount.M(1)) - - // Perform request. - resp, err := t.base.RoundTrip(req) - - if err != nil { - track.statusCode = http.StatusInternalServerError - track.end() - } else { - track.statusCode = resp.StatusCode - if req.Method != "HEAD" { - track.respContentLength = resp.ContentLength - } - if resp.Body == nil { - track.end() - } else { - track.body = resp.Body - resp.Body = wrappedBody(track, resp.Body) - } - } - return resp, err -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t statsTransport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base.(canceler); ok { - cr.CancelRequest(req) - } -} - -type tracker struct { - ctx context.Context - respSize int64 - respContentLength int64 - reqSize int64 - start time.Time - body io.ReadCloser - statusCode int - endOnce sync.Once -} - -var _ io.ReadCloser = (*tracker)(nil) - -func (t *tracker) end() { - t.endOnce.Do(func() { - latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond) - respSize := t.respSize - if t.respSize == 0 && t.respContentLength > 0 { - respSize = t.respContentLength - } - m := []stats.Measurement{ - ClientSentBytes.M(t.reqSize), - ClientReceivedBytes.M(respSize), - ClientRoundtripLatency.M(latencyMs), - ClientLatency.M(latencyMs), - ClientResponseBytes.M(t.respSize), - } - if t.reqSize >= 0 { - m = append(m, ClientRequestBytes.M(t.reqSize)) - } - - stats.RecordWithTags(t.ctx, []tag.Mutator{ - tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)), - tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)), - }, m...) - }) -} - -func (t *tracker) Read(b []byte) (int, error) { - n, err := t.body.Read(b) - t.respSize += int64(n) - switch err { - case nil: - return n, nil - case io.EOF: - t.end() - } - return n, err -} - -func (t *tracker) Close() error { - // Invoking endSpan on Close will help catch the cases - // in which a read returned a non-nil error, we set the - // span status but didn't end the span. - t.end() - return t.body.Close() -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/doc.go b/vendor/go.opencensus.io/plugin/ochttp/doc.go deleted file mode 100644 index 10e626b16e6..00000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package ochttp provides OpenCensus instrumentation for net/http package. -// -// For server instrumentation, see Handler. For client-side instrumentation, -// see Transport. -package ochttp // import "go.opencensus.io/plugin/ochttp" diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go deleted file mode 100644 index 9ad8852198d..00000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package b3 contains a propagation.HTTPFormat implementation -// for B3 propagation. See https://github.com/openzipkin/b3-propagation -// for more details. -package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3" - -import ( - "encoding/hex" - "net/http" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -// B3 headers that OpenCensus understands. -const ( - TraceIDHeader = "X-B3-TraceId" - SpanIDHeader = "X-B3-SpanId" - SampledHeader = "X-B3-Sampled" -) - -// HTTPFormat implements propagation.HTTPFormat to propagate -// traces in HTTP headers in B3 propagation format. -// HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers -// because there are additional fields not represented in the -// OpenCensus span context. Spans created from the incoming -// header will be the direct children of the client-side span. -// Similarly, receiver of the outgoing spans should use client-side -// span created by OpenCensus as the parent. -type HTTPFormat struct{} - -var _ propagation.HTTPFormat = (*HTTPFormat)(nil) - -// SpanContextFromRequest extracts a B3 span context from incoming requests. -func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { - tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader)) - if !ok { - return trace.SpanContext{}, false - } - sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader)) - if !ok { - return trace.SpanContext{}, false - } - sampled, _ := ParseSampled(req.Header.Get(SampledHeader)) - return trace.SpanContext{ - TraceID: tid, - SpanID: sid, - TraceOptions: sampled, - }, true -} - -// ParseTraceID parses the value of the X-B3-TraceId header. -func ParseTraceID(tid string) (trace.TraceID, bool) { - if tid == "" { - return trace.TraceID{}, false - } - b, err := hex.DecodeString(tid) - if err != nil || len(b) > 16 { - return trace.TraceID{}, false - } - var traceID trace.TraceID - if len(b) <= 8 { - // The lower 64-bits. - start := 8 + (8 - len(b)) - copy(traceID[start:], b) - } else { - start := 16 - len(b) - copy(traceID[start:], b) - } - - return traceID, true -} - -// ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers. -func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) { - if sid == "" { - return trace.SpanID{}, false - } - b, err := hex.DecodeString(sid) - if err != nil || len(b) > 8 { - return trace.SpanID{}, false - } - start := 8 - len(b) - copy(spanID[start:], b) - return spanID, true -} - -// ParseSampled parses the value of the X-B3-Sampled header. -func ParseSampled(sampled string) (trace.TraceOptions, bool) { - switch sampled { - case "true", "1": - return trace.TraceOptions(1), true - default: - return trace.TraceOptions(0), false - } -} - -// SpanContextToRequest modifies the given request to include B3 headers. -func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { - req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:])) - req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:])) - - var sampled string - if sc.IsSampled() { - sampled = "1" - } else { - sampled = "0" - } - req.Header.Set(SampledHeader, sampled) -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/route.go b/vendor/go.opencensus.io/plugin/ochttp/route.go deleted file mode 100644 index 5e6a3430760..00000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/route.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "context" - "net/http" - - "go.opencensus.io/tag" -) - -// SetRoute sets the http_server_route tag to the given value. -// It's useful when an HTTP framework does not support the http.Handler interface -// and using WithRouteTag is not an option, but provides a way to hook into the request flow. -func SetRoute(ctx context.Context, route string) { - if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok { - a.t = append(a.t, tag.Upsert(KeyServerRoute, route)) - } -} - -// WithRouteTag returns an http.Handler that records stats with the -// http_server_route tag set to the given value. -func WithRouteTag(handler http.Handler, route string) http.Handler { - return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator { - addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)} - ctx, _ := tag.New(r.Context(), addRoute...) - r = r.WithContext(ctx) - handler.ServeHTTP(w, r) - return addRoute - }) -} - -// taggedHandlerFunc is a http.Handler that returns tags describing the -// processing of the request. These tags will be recorded along with the -// measures in this package at the end of the request. -type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator - -func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) { - tags := h(w, r) - if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok { - a.t = append(a.t, tags...) - } -} - -type addedTagsKey struct{} - -type addedTags struct { - t []tag.Mutator -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go deleted file mode 100644 index f7c8434be06..00000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/server.go +++ /dev/null @@ -1,455 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "context" - "io" - "net/http" - "strconv" - "sync" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -// Handler is an http.Handler wrapper to instrument your HTTP server with -// OpenCensus. It supports both stats and tracing. -// -// # Tracing -// -// This handler is aware of the incoming request's span, reading it from request -// headers as configured using the Propagation field. -// The extracted span can be accessed from the incoming request's -// context. -// -// span := trace.FromContext(r.Context()) -// -// The server span will be automatically ended at the end of ServeHTTP. -type Handler struct { - // Propagation defines how traces are propagated. If unspecified, - // B3 propagation will be used. - Propagation propagation.HTTPFormat - - // Handler is the handler used to handle the incoming request. - Handler http.Handler - - // StartOptions are applied to the span started by this Handler around each - // request. - // - // StartOptions.SpanKind will always be set to trace.SpanKindServer - // for spans started by this transport. - StartOptions trace.StartOptions - - // GetStartOptions allows to set start options per request. If set, - // StartOptions is going to be ignored. - GetStartOptions func(*http.Request) trace.StartOptions - - // IsPublicEndpoint should be set to true for publicly accessible HTTP(S) - // servers. If true, any trace metadata set on the incoming request will - // be added as a linked trace instead of being added as a parent of the - // current trace. - IsPublicEndpoint bool - - // FormatSpanName holds the function to use for generating the span name - // from the information found in the incoming HTTP Request. By default the - // name equals the URL Path. - FormatSpanName func(*http.Request) string - - // IsHealthEndpoint holds the function to use for determining if the - // incoming HTTP request should be considered a health check. This is in - // addition to the private isHealthEndpoint func which may also indicate - // tracing should be skipped. - IsHealthEndpoint func(*http.Request) bool -} - -func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - var tags addedTags - r, traceEnd := h.startTrace(w, r) - defer traceEnd() - w, statsEnd := h.startStats(w, r) - defer statsEnd(&tags) - handler := h.Handler - if handler == nil { - handler = http.DefaultServeMux - } - r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags)) - handler.ServeHTTP(w, r) -} - -func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { - if h.IsHealthEndpoint != nil && h.IsHealthEndpoint(r) || isHealthEndpoint(r.URL.Path) { - return r, func() {} - } - var name string - if h.FormatSpanName == nil { - name = spanNameFromURL(r) - } else { - name = h.FormatSpanName(r) - } - ctx := r.Context() - - startOpts := h.StartOptions - if h.GetStartOptions != nil { - startOpts = h.GetStartOptions(r) - } - - var span *trace.Span - sc, ok := h.extractSpanContext(r) - if ok && !h.IsPublicEndpoint { - ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc, - trace.WithSampler(startOpts.Sampler), - trace.WithSpanKind(trace.SpanKindServer)) - } else { - ctx, span = trace.StartSpan(ctx, name, - trace.WithSampler(startOpts.Sampler), - trace.WithSpanKind(trace.SpanKindServer), - ) - if ok { - span.AddLink(trace.Link{ - TraceID: sc.TraceID, - SpanID: sc.SpanID, - Type: trace.LinkTypeParent, - Attributes: nil, - }) - } - } - span.AddAttributes(requestAttrs(r)...) - if r.Body == nil { - // TODO: Handle cases where ContentLength is not set. - } else if r.ContentLength > 0 { - span.AddMessageReceiveEvent(0, /* TODO: messageID */ - r.ContentLength, -1) - } - return r.WithContext(ctx), span.End -} - -func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) { - if h.Propagation == nil { - return defaultFormat.SpanContextFromRequest(r) - } - return h.Propagation.SpanContextFromRequest(r) -} - -func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) { - ctx, _ := tag.New(r.Context(), - tag.Upsert(Host, r.Host), - tag.Upsert(Path, r.URL.Path), - tag.Upsert(Method, r.Method)) - track := &trackingResponseWriter{ - start: time.Now(), - ctx: ctx, - writer: w, - } - if r.Body == nil { - // TODO: Handle cases where ContentLength is not set. - track.reqSize = -1 - } else if r.ContentLength > 0 { - track.reqSize = r.ContentLength - } - stats.Record(ctx, ServerRequestCount.M(1)) - return track.wrappedResponseWriter(), track.end -} - -type trackingResponseWriter struct { - ctx context.Context - reqSize int64 - respSize int64 - start time.Time - statusCode int - statusLine string - endOnce sync.Once - writer http.ResponseWriter -} - -// Compile time assertion for ResponseWriter interface -var _ http.ResponseWriter = (*trackingResponseWriter)(nil) - -func (t *trackingResponseWriter) end(tags *addedTags) { - t.endOnce.Do(func() { - if t.statusCode == 0 { - t.statusCode = 200 - } - - span := trace.FromContext(t.ctx) - span.SetStatus(TraceStatus(t.statusCode, t.statusLine)) - span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode))) - - m := []stats.Measurement{ - ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), - ServerResponseBytes.M(t.respSize), - } - if t.reqSize >= 0 { - m = append(m, ServerRequestBytes.M(t.reqSize)) - } - allTags := make([]tag.Mutator, len(tags.t)+1) - allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)) - copy(allTags[1:], tags.t) - stats.RecordWithTags(t.ctx, allTags, m...) - }) -} - -func (t *trackingResponseWriter) Header() http.Header { - return t.writer.Header() -} - -func (t *trackingResponseWriter) Write(data []byte) (int, error) { - n, err := t.writer.Write(data) - t.respSize += int64(n) - // Add message event for request bytes sent. - span := trace.FromContext(t.ctx) - span.AddMessageSendEvent(0 /* TODO: messageID */, int64(n), -1) - return n, err -} - -func (t *trackingResponseWriter) WriteHeader(statusCode int) { - t.writer.WriteHeader(statusCode) - t.statusCode = statusCode - t.statusLine = http.StatusText(t.statusCode) -} - -// wrappedResponseWriter returns a wrapped version of the original -// -// ResponseWriter and only implements the same combination of additional -// -// interfaces as the original. -// This implementation is based on https://github.com/felixge/httpsnoop. -func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { - var ( - hj, i0 = t.writer.(http.Hijacker) - cn, i1 = t.writer.(http.CloseNotifier) - pu, i2 = t.writer.(http.Pusher) - fl, i3 = t.writer.(http.Flusher) - rf, i4 = t.writer.(io.ReaderFrom) - ) - - switch { - case !i0 && !i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - }{t} - case !i0 && !i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - io.ReaderFrom - }{t, rf} - case !i0 && !i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Flusher - }{t, fl} - case !i0 && !i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Flusher - io.ReaderFrom - }{t, fl, rf} - case !i0 && !i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Pusher - }{t, pu} - case !i0 && !i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Pusher - io.ReaderFrom - }{t, pu, rf} - case !i0 && !i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Pusher - http.Flusher - }{t, pu, fl} - case !i0 && !i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Pusher - http.Flusher - io.ReaderFrom - }{t, pu, fl, rf} - case !i0 && i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - }{t, cn} - case !i0 && i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - io.ReaderFrom - }{t, cn, rf} - case !i0 && i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Flusher - }{t, cn, fl} - case !i0 && i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Flusher - io.ReaderFrom - }{t, cn, fl, rf} - case !i0 && i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - }{t, cn, pu} - case !i0 && i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - io.ReaderFrom - }{t, cn, pu, rf} - case !i0 && i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - http.Flusher - }{t, cn, pu, fl} - case !i0 && i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - http.Flusher - io.ReaderFrom - }{t, cn, pu, fl, rf} - case i0 && !i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - }{t, hj} - case i0 && !i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - io.ReaderFrom - }{t, hj, rf} - case i0 && !i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Flusher - }{t, hj, fl} - case i0 && !i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Flusher - io.ReaderFrom - }{t, hj, fl, rf} - case i0 && !i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - }{t, hj, pu} - case i0 && !i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - io.ReaderFrom - }{t, hj, pu, rf} - case i0 && !i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - http.Flusher - }{t, hj, pu, fl} - case i0 && !i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - http.Flusher - io.ReaderFrom - }{t, hj, pu, fl, rf} - case i0 && i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - }{t, hj, cn} - case i0 && i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - io.ReaderFrom - }{t, hj, cn, rf} - case i0 && i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Flusher - }{t, hj, cn, fl} - case i0 && i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Flusher - io.ReaderFrom - }{t, hj, cn, fl, rf} - case i0 && i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - }{t, hj, cn, pu} - case i0 && i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - io.ReaderFrom - }{t, hj, cn, pu, rf} - case i0 && i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - http.Flusher - }{t, hj, cn, pu, fl} - case i0 && i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - http.Flusher - io.ReaderFrom - }{t, hj, cn, pu, fl, rf} - default: - return struct { - http.ResponseWriter - }{t} - } -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go deleted file mode 100644 index 05c6c56cc79..00000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "crypto/tls" - "net/http" - "net/http/httptrace" - "strings" - - "go.opencensus.io/trace" -) - -type spanAnnotator struct { - sp *trace.Span -} - -// TODO: Remove NewSpanAnnotator at the next release. - -// NewSpanAnnotator returns a httptrace.ClientTrace which annotates -// all emitted httptrace events on the provided Span. -// Deprecated: Use NewSpanAnnotatingClientTrace instead -func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace { - return NewSpanAnnotatingClientTrace(r, s) -} - -// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates -// all emitted httptrace events on the provided Span. -func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace { - sa := spanAnnotator{sp: s} - - return &httptrace.ClientTrace{ - GetConn: sa.getConn, - GotConn: sa.gotConn, - PutIdleConn: sa.putIdleConn, - GotFirstResponseByte: sa.gotFirstResponseByte, - Got100Continue: sa.got100Continue, - DNSStart: sa.dnsStart, - DNSDone: sa.dnsDone, - ConnectStart: sa.connectStart, - ConnectDone: sa.connectDone, - TLSHandshakeStart: sa.tlsHandshakeStart, - TLSHandshakeDone: sa.tlsHandshakeDone, - WroteHeaders: sa.wroteHeaders, - Wait100Continue: sa.wait100Continue, - WroteRequest: sa.wroteRequest, - } -} - -func (s spanAnnotator) getConn(hostPort string) { - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.get_connection.host_port", hostPort), - } - s.sp.Annotate(attrs, "GetConn") -} - -func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) { - attrs := []trace.Attribute{ - trace.BoolAttribute("httptrace.got_connection.reused", info.Reused), - trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle), - } - if info.WasIdle { - attrs = append(attrs, - trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String())) - } - s.sp.Annotate(attrs, "GotConn") -} - -// PutIdleConn implements a httptrace.ClientTrace hook -func (s spanAnnotator) putIdleConn(err error) { - var attrs []trace.Attribute - if err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.put_idle_connection.error", err.Error())) - } - s.sp.Annotate(attrs, "PutIdleConn") -} - -func (s spanAnnotator) gotFirstResponseByte() { - s.sp.Annotate(nil, "GotFirstResponseByte") -} - -func (s spanAnnotator) got100Continue() { - s.sp.Annotate(nil, "Got100Continue") -} - -func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) { - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.dns_start.host", info.Host), - } - s.sp.Annotate(attrs, "DNSStart") -} - -func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) { - var addrs []string - for _, addr := range info.Addrs { - addrs = append(addrs, addr.String()) - } - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")), - } - if info.Err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.dns_done.error", info.Err.Error())) - } - s.sp.Annotate(attrs, "DNSDone") -} - -func (s spanAnnotator) connectStart(network, addr string) { - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.connect_start.network", network), - trace.StringAttribute("httptrace.connect_start.addr", addr), - } - s.sp.Annotate(attrs, "ConnectStart") -} - -func (s spanAnnotator) connectDone(network, addr string, err error) { - attrs := []trace.Attribute{ - trace.StringAttribute("httptrace.connect_done.network", network), - trace.StringAttribute("httptrace.connect_done.addr", addr), - } - if err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.connect_done.error", err.Error())) - } - s.sp.Annotate(attrs, "ConnectDone") -} - -func (s spanAnnotator) tlsHandshakeStart() { - s.sp.Annotate(nil, "TLSHandshakeStart") -} - -func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) { - var attrs []trace.Attribute - if err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error())) - } - s.sp.Annotate(attrs, "TLSHandshakeDone") -} - -func (s spanAnnotator) wroteHeaders() { - s.sp.Annotate(nil, "WroteHeaders") -} - -func (s spanAnnotator) wait100Continue() { - s.sp.Annotate(nil, "Wait100Continue") -} - -func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) { - var attrs []trace.Attribute - if info.Err != nil { - attrs = append(attrs, - trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error())) - } - s.sp.Annotate(attrs, "WroteRequest") -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go deleted file mode 100644 index ee3729040dd..00000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/stats.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -// Deprecated: client HTTP measures. -var ( - // Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect. - ClientRequestCount = stats.Int64( - "opencensus.io/http/client/request_count", - "Number of HTTP requests started", - stats.UnitDimensionless) - // Deprecated: Use ClientSentBytes. - ClientRequestBytes = stats.Int64( - "opencensus.io/http/client/request_bytes", - "HTTP request body size if set as ContentLength (uncompressed)", - stats.UnitBytes) - // Deprecated: Use ClientReceivedBytes. - ClientResponseBytes = stats.Int64( - "opencensus.io/http/client/response_bytes", - "HTTP response body size (uncompressed)", - stats.UnitBytes) - // Deprecated: Use ClientRoundtripLatency. - ClientLatency = stats.Float64( - "opencensus.io/http/client/latency", - "End-to-end latency", - stats.UnitMilliseconds) -) - -// The following client HTTP measures are supported for use in custom views. -var ( - ClientSentBytes = stats.Int64( - "opencensus.io/http/client/sent_bytes", - "Total bytes sent in request body (not including headers)", - stats.UnitBytes, - ) - ClientReceivedBytes = stats.Int64( - "opencensus.io/http/client/received_bytes", - "Total bytes received in response bodies (not including headers but including error responses with bodies)", - stats.UnitBytes, - ) - ClientRoundtripLatency = stats.Float64( - "opencensus.io/http/client/roundtrip_latency", - "Time between first byte of request headers sent to last byte of response received, or terminal error", - stats.UnitMilliseconds, - ) -) - -// The following server HTTP measures are supported for use in custom views: -var ( - ServerRequestCount = stats.Int64( - "opencensus.io/http/server/request_count", - "Number of HTTP requests started", - stats.UnitDimensionless) - ServerRequestBytes = stats.Int64( - "opencensus.io/http/server/request_bytes", - "HTTP request body size if set as ContentLength (uncompressed)", - stats.UnitBytes) - ServerResponseBytes = stats.Int64( - "opencensus.io/http/server/response_bytes", - "HTTP response body size (uncompressed)", - stats.UnitBytes) - ServerLatency = stats.Float64( - "opencensus.io/http/server/latency", - "End-to-end latency", - stats.UnitMilliseconds) -) - -// The following tags are applied to stats recorded by this package. Host, Path -// and Method are applied to all measures. StatusCode is not applied to -// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known. -var ( - // Host is the value of the HTTP Host header. - // - // The value of this tag can be controlled by the HTTP client, so you need - // to watch out for potentially generating high-cardinality labels in your - // metrics backend if you use this tag in views. - Host = tag.MustNewKey("http.host") - - // StatusCode is the numeric HTTP response status code, - // or "error" if a transport error occurred and no status code was read. - StatusCode = tag.MustNewKey("http.status") - - // Path is the URL path (not including query string) in the request. - // - // The value of this tag can be controlled by the HTTP client, so you need - // to watch out for potentially generating high-cardinality labels in your - // metrics backend if you use this tag in views. - Path = tag.MustNewKey("http.path") - - // Method is the HTTP method of the request, capitalized (GET, POST, etc.). - Method = tag.MustNewKey("http.method") - - // KeyServerRoute is a low cardinality string representing the logical - // handler of the request. This is usually the pattern registered on the a - // ServeMux (or similar string). - KeyServerRoute = tag.MustNewKey("http_server_route") -) - -// Client tag keys. -var ( - // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.). - KeyClientMethod = tag.MustNewKey("http_client_method") - // KeyClientPath is the URL path (not including query string). - KeyClientPath = tag.MustNewKey("http_client_path") - // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received. - KeyClientStatus = tag.MustNewKey("http_client_status") - // KeyClientHost is the value of the request Host header. - KeyClientHost = tag.MustNewKey("http_client_host") -) - -// Default distributions used by views in this package. -var ( - DefaultSizeDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) - DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) -) - -// Package ochttp provides some convenience views for client measures. -// You still need to register these views for data to actually be collected. -var ( - ClientSentBytesDistribution = &view.View{ - Name: "opencensus.io/http/client/sent_bytes", - Measure: ClientSentBytes, - Aggregation: DefaultSizeDistribution, - Description: "Total bytes sent in request body (not including headers), by HTTP method and response status", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - } - - ClientReceivedBytesDistribution = &view.View{ - Name: "opencensus.io/http/client/received_bytes", - Measure: ClientReceivedBytes, - Aggregation: DefaultSizeDistribution, - Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - } - - ClientRoundtripLatencyDistribution = &view.View{ - Name: "opencensus.io/http/client/roundtrip_latency", - Measure: ClientRoundtripLatency, - Aggregation: DefaultLatencyDistribution, - Description: "End-to-end latency, by HTTP method and response status", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - } - - ClientCompletedCount = &view.View{ - Name: "opencensus.io/http/client/completed_count", - Measure: ClientRoundtripLatency, - Aggregation: view.Count(), - Description: "Count of completed requests, by HTTP method and response status", - TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, - } -) - -// Deprecated: Old client Views. -var ( - // Deprecated: No direct replacement, but see ClientCompletedCount. - ClientRequestCountView = &view.View{ - Name: "opencensus.io/http/client/request_count", - Description: "Count of HTTP requests started", - Measure: ClientRequestCount, - Aggregation: view.Count(), - } - - // Deprecated: Use ClientSentBytesDistribution. - ClientRequestBytesView = &view.View{ - Name: "opencensus.io/http/client/request_bytes", - Description: "Size distribution of HTTP request body", - Measure: ClientSentBytes, - Aggregation: DefaultSizeDistribution, - } - - // Deprecated: Use ClientReceivedBytesDistribution instead. - ClientResponseBytesView = &view.View{ - Name: "opencensus.io/http/client/response_bytes", - Description: "Size distribution of HTTP response body", - Measure: ClientReceivedBytes, - Aggregation: DefaultSizeDistribution, - } - - // Deprecated: Use ClientRoundtripLatencyDistribution instead. - ClientLatencyView = &view.View{ - Name: "opencensus.io/http/client/latency", - Description: "Latency distribution of HTTP requests", - Measure: ClientRoundtripLatency, - Aggregation: DefaultLatencyDistribution, - } - - // Deprecated: Use ClientCompletedCount instead. - ClientRequestCountByMethod = &view.View{ - Name: "opencensus.io/http/client/request_count_by_method", - Description: "Client request count by HTTP method", - TagKeys: []tag.Key{Method}, - Measure: ClientSentBytes, - Aggregation: view.Count(), - } - - // Deprecated: Use ClientCompletedCount instead. - ClientResponseCountByStatusCode = &view.View{ - Name: "opencensus.io/http/client/response_count_by_status_code", - Description: "Client response count by status code", - TagKeys: []tag.Key{StatusCode}, - Measure: ClientRoundtripLatency, - Aggregation: view.Count(), - } -) - -// Package ochttp provides some convenience views for server measures. -// You still need to register these views for data to actually be collected. -var ( - ServerRequestCountView = &view.View{ - Name: "opencensus.io/http/server/request_count", - Description: "Count of HTTP requests started", - Measure: ServerRequestCount, - Aggregation: view.Count(), - } - - ServerRequestBytesView = &view.View{ - Name: "opencensus.io/http/server/request_bytes", - Description: "Size distribution of HTTP request body", - Measure: ServerRequestBytes, - Aggregation: DefaultSizeDistribution, - } - - ServerResponseBytesView = &view.View{ - Name: "opencensus.io/http/server/response_bytes", - Description: "Size distribution of HTTP response body", - Measure: ServerResponseBytes, - Aggregation: DefaultSizeDistribution, - } - - ServerLatencyView = &view.View{ - Name: "opencensus.io/http/server/latency", - Description: "Latency distribution of HTTP requests", - Measure: ServerLatency, - Aggregation: DefaultLatencyDistribution, - } - - ServerRequestCountByMethod = &view.View{ - Name: "opencensus.io/http/server/request_count_by_method", - Description: "Server request count by HTTP method", - TagKeys: []tag.Key{Method}, - Measure: ServerRequestCount, - Aggregation: view.Count(), - } - - ServerResponseCountByStatusCode = &view.View{ - Name: "opencensus.io/http/server/response_count_by_status_code", - Description: "Server response count by status code", - TagKeys: []tag.Key{StatusCode}, - Measure: ServerLatency, - Aggregation: view.Count(), - } -) - -// DefaultClientViews are the default client views provided by this package. -// Deprecated: No replacement. Register the views you would like individually. -var DefaultClientViews = []*view.View{ - ClientRequestCountView, - ClientRequestBytesView, - ClientResponseBytesView, - ClientLatencyView, - ClientRequestCountByMethod, - ClientResponseCountByStatusCode, -} - -// DefaultServerViews are the default server views provided by this package. -// Deprecated: No replacement. Register the views you would like individually. -var DefaultServerViews = []*view.View{ - ServerRequestCountView, - ServerRequestBytesView, - ServerResponseBytesView, - ServerLatencyView, - ServerRequestCountByMethod, - ServerResponseCountByStatusCode, -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go deleted file mode 100644 index ed3a5db5611..00000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/trace.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "io" - "net/http" - "net/http/httptrace" - - "go.opencensus.io/plugin/ochttp/propagation/b3" - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -// TODO(jbd): Add godoc examples. - -var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{} - -// Attributes recorded on the span for the requests. -// Only trace exporters will need them. -const ( - HostAttribute = "http.host" - MethodAttribute = "http.method" - PathAttribute = "http.path" - URLAttribute = "http.url" - UserAgentAttribute = "http.user_agent" - StatusCodeAttribute = "http.status_code" -) - -type traceTransport struct { - base http.RoundTripper - startOptions trace.StartOptions - format propagation.HTTPFormat - formatSpanName func(*http.Request) string - newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace -} - -// TODO(jbd): Add message events for request and response size. - -// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers. -// The created span can follow a parent span, if a parent is presented in -// the request's context. -func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { - name := t.formatSpanName(req) - // TODO(jbd): Discuss whether we want to prefix - // outgoing requests with Sent. - ctx, span := trace.StartSpan(req.Context(), name, - trace.WithSampler(t.startOptions.Sampler), - trace.WithSpanKind(trace.SpanKindClient)) - - if t.newClientTrace != nil { - req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span))) - } else { - req = req.WithContext(ctx) - } - - if t.format != nil { - // SpanContextToRequest will modify its Request argument, which is - // contrary to the contract for http.RoundTripper, so we need to - // pass it a copy of the Request. - // However, the Request struct itself was already copied by - // the WithContext calls above and so we just need to copy the header. - header := make(http.Header) - for k, v := range req.Header { - header[k] = v - } - req.Header = header - t.format.SpanContextToRequest(span.SpanContext(), req) - } - - span.AddAttributes(requestAttrs(req)...) - resp, err := t.base.RoundTrip(req) - if err != nil { - span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) - span.End() - return resp, err - } - - span.AddAttributes(responseAttrs(resp)...) - span.SetStatus(TraceStatus(resp.StatusCode, resp.Status)) - - // span.End() will be invoked after - // a read from resp.Body returns io.EOF or when - // resp.Body.Close() is invoked. - bt := &bodyTracker{rc: resp.Body, span: span} - resp.Body = wrappedBody(bt, resp.Body) - return resp, err -} - -// bodyTracker wraps a response.Body and invokes -// trace.EndSpan on encountering io.EOF on reading -// the body of the original response. -type bodyTracker struct { - rc io.ReadCloser - span *trace.Span -} - -var _ io.ReadCloser = (*bodyTracker)(nil) - -func (bt *bodyTracker) Read(b []byte) (int, error) { - n, err := bt.rc.Read(b) - - switch err { - case nil: - return n, nil - case io.EOF: - bt.span.End() - default: - // For all other errors, set the span status - bt.span.SetStatus(trace.Status{ - // Code 2 is the error code for Internal server error. - Code: 2, - Message: err.Error(), - }) - } - return n, err -} - -func (bt *bodyTracker) Close() error { - // Invoking endSpan on Close will help catch the cases - // in which a read returned a non-nil error, we set the - // span status but didn't end the span. - bt.span.End() - return bt.rc.Close() -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *traceTransport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base.(canceler); ok { - cr.CancelRequest(req) - } -} - -func spanNameFromURL(req *http.Request) string { - return req.URL.Path -} - -func requestAttrs(r *http.Request) []trace.Attribute { - userAgent := r.UserAgent() - - attrs := make([]trace.Attribute, 0, 5) - attrs = append(attrs, - trace.StringAttribute(PathAttribute, r.URL.Path), - trace.StringAttribute(URLAttribute, r.URL.String()), - trace.StringAttribute(HostAttribute, r.Host), - trace.StringAttribute(MethodAttribute, r.Method), - ) - - if userAgent != "" { - attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent)) - } - - return attrs -} - -func responseAttrs(resp *http.Response) []trace.Attribute { - return []trace.Attribute{ - trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)), - } -} - -// TraceStatus is a utility to convert the HTTP status code to a trace.Status that -// represents the outcome as closely as possible. -func TraceStatus(httpStatusCode int, statusLine string) trace.Status { - var code int32 - if httpStatusCode < 200 || httpStatusCode >= 400 { - code = trace.StatusCodeUnknown - } - switch httpStatusCode { - case 499: - code = trace.StatusCodeCancelled - case http.StatusBadRequest: - code = trace.StatusCodeInvalidArgument - case http.StatusUnprocessableEntity: - code = trace.StatusCodeInvalidArgument - case http.StatusGatewayTimeout: - code = trace.StatusCodeDeadlineExceeded - case http.StatusNotFound: - code = trace.StatusCodeNotFound - case http.StatusForbidden: - code = trace.StatusCodePermissionDenied - case http.StatusUnauthorized: // 401 is actually unauthenticated. - code = trace.StatusCodeUnauthenticated - case http.StatusTooManyRequests: - code = trace.StatusCodeResourceExhausted - case http.StatusNotImplemented: - code = trace.StatusCodeUnimplemented - case http.StatusServiceUnavailable: - code = trace.StatusCodeUnavailable - case http.StatusOK: - code = trace.StatusCodeOK - case http.StatusConflict: - code = trace.StatusCodeAlreadyExists - } - - return trace.Status{Code: code, Message: codeToStr[code]} -} - -var codeToStr = map[int32]string{ - trace.StatusCodeOK: `OK`, - trace.StatusCodeCancelled: `CANCELLED`, - trace.StatusCodeUnknown: `UNKNOWN`, - trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`, - trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`, - trace.StatusCodeNotFound: `NOT_FOUND`, - trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`, - trace.StatusCodePermissionDenied: `PERMISSION_DENIED`, - trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`, - trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`, - trace.StatusCodeAborted: `ABORTED`, - trace.StatusCodeOutOfRange: `OUT_OF_RANGE`, - trace.StatusCodeUnimplemented: `UNIMPLEMENTED`, - trace.StatusCodeInternal: `INTERNAL`, - trace.StatusCodeUnavailable: `UNAVAILABLE`, - trace.StatusCodeDataLoss: `DATA_LOSS`, - trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`, -} - -func isHealthEndpoint(path string) bool { - // Health checking is pretty frequent and - // traces collected for health endpoints - // can be extremely noisy and expensive. - // Disable canonical health checking endpoints - // like /healthz and /_ah/health for now. - if path == "/healthz" || path == "/_ah/health" { - return true - } - return false -} diff --git a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go deleted file mode 100644 index 7d75cae2b18..00000000000 --- a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ochttp - -import ( - "io" -) - -// wrappedBody returns a wrapped version of the original -// Body and only implements the same combination of additional -// interfaces as the original. -func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser { - var ( - wr, i0 = body.(io.Writer) - ) - switch { - case !i0: - return struct { - io.ReadCloser - }{wrapper} - - case i0: - return struct { - io.ReadCloser - io.Writer - }{wrapper, wr} - default: - return struct { - io.ReadCloser - }{wrapper} - } -} diff --git a/vendor/go.opencensus.io/resource/resource.go b/vendor/go.opencensus.io/resource/resource.go deleted file mode 100644 index b1764e1d3b9..00000000000 --- a/vendor/go.opencensus.io/resource/resource.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package resource provides functionality for resource, which capture -// identifying information about the entities for which signals are exported. -package resource - -import ( - "context" - "fmt" - "os" - "regexp" - "sort" - "strconv" - "strings" -) - -// Environment variables used by FromEnv to decode a resource. -const ( - EnvVarType = "OC_RESOURCE_TYPE" - EnvVarLabels = "OC_RESOURCE_LABELS" -) - -// Resource describes an entity about which identifying information and metadata is exposed. -// For example, a type "k8s.io/container" may hold labels describing the pod name and namespace. -type Resource struct { - Type string - Labels map[string]string -} - -// EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable. -func EncodeLabels(labels map[string]string) string { - sortedKeys := make([]string, 0, len(labels)) - for k := range labels { - sortedKeys = append(sortedKeys, k) - } - sort.Strings(sortedKeys) - - s := "" - for i, k := range sortedKeys { - if i > 0 { - s += "," - } - s += k + "=" + strconv.Quote(labels[k]) - } - return s -} - -var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`) - -// DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable. -// A list of labels of the form `="",="",...` is accepted. -// Domain names and paths are accepted as label keys. -// Most users will want to use FromEnv instead. -func DecodeLabels(s string) (map[string]string, error) { - m := map[string]string{} - // Ensure a trailing comma, which allows us to keep the regex simpler - s = strings.TrimRight(strings.TrimSpace(s), ",") + "," - - for len(s) > 0 { - match := labelRegex.FindStringSubmatch(s) - if len(match) == 0 { - return nil, fmt.Errorf("invalid label formatting, remainder: %s", s) - } - v := match[2] - if v == "" { - v = match[3] - } else { - var err error - if v, err = strconv.Unquote(v); err != nil { - return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err) - } - } - m[match[1]] = v - - s = s[len(match[0]):] - } - return m, nil -} - -// FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE -// and OC_RESOURCE_labelS environment variables. -func FromEnv(context.Context) (*Resource, error) { - res := &Resource{ - Type: strings.TrimSpace(os.Getenv(EnvVarType)), - } - labels := strings.TrimSpace(os.Getenv(EnvVarLabels)) - if labels == "" { - return res, nil - } - var err error - if res.Labels, err = DecodeLabels(labels); err != nil { - return nil, err - } - return res, nil -} - -var _ Detector = FromEnv - -// merge resource information from b into a. In case of a collision, a takes precedence. -func merge(a, b *Resource) *Resource { - if a == nil { - return b - } - if b == nil { - return a - } - res := &Resource{ - Type: a.Type, - Labels: map[string]string{}, - } - if res.Type == "" { - res.Type = b.Type - } - for k, v := range b.Labels { - res.Labels[k] = v - } - // Labels from resource a overwrite labels from resource b. - for k, v := range a.Labels { - res.Labels[k] = v - } - return res -} - -// Detector attempts to detect resource information. -// If the detector cannot find resource information, the returned resource is nil but no -// error is returned. -// An error is only returned on unexpected failures. -type Detector func(context.Context) (*Resource, error) - -// MultiDetector returns a Detector that calls all input detectors in order and -// merges each result with the previous one. In case a type of label key is already set, -// the first set value is takes precedence. -// It returns on the first error that a sub-detector encounters. -func MultiDetector(detectors ...Detector) Detector { - return func(ctx context.Context) (*Resource, error) { - return detectAll(ctx, detectors...) - } -} - -// detectall calls all input detectors sequentially an merges each result with the previous one. -// It returns on the first error that a sub-detector encounters. -func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) { - var res *Resource - for _, d := range detectors { - r, err := d(ctx) - if err != nil { - return nil, err - } - res = merge(res, r) - } - return res, nil -} diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go deleted file mode 100644 index 31477a464fd..00000000000 --- a/vendor/go.opencensus.io/stats/doc.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -/* -Package stats contains support for OpenCensus stats recording. - -OpenCensus allows users to create typed measures, record measurements, -aggregate the collected data, and export the aggregated data. - -# Measures - -A measure represents a type of data point to be tracked and recorded. -For example, latency, request Mb/s, and response Mb/s are measures -to collect from a server. - -Measure constructors such as Int64 and Float64 automatically -register the measure by the given name. Each registered measure needs -to be unique by name. Measures also have a description and a unit. - -Libraries can define and export measures. Application authors can then -create views and collect and break down measures by the tags they are -interested in. - -# Recording measurements - -Measurement is a data point to be collected for a measure. For example, -for a latency (ms) measure, 100 is a measurement that represents a 100ms -latency event. Measurements are created from measures with -the current context. Tags from the current context are recorded with the -measurements if they are any. - -Recorded measurements are dropped immediately if no views are registered for them. -There is usually no need to conditionally enable and disable -recording to reduce cost. Recording of measurements is cheap. - -Libraries can always record measurements, and applications can later decide -on which measurements they want to collect by registering views. This allows -libraries to turn on the instrumentation by default. - -# Exemplars - -For a given recorded measurement, the associated exemplar is a diagnostic map -that gives more information about the measurement. - -When aggregated using a Distribution aggregation, an exemplar is kept for each -bucket in the Distribution. This allows you to easily find an example of a -measurement that fell into each bucket. - -For example, if you also use the OpenCensus trace package and you -record a measurement with a context that contains a sampled trace span, -then the trace span will be added to the exemplar associated with the measurement. - -When exported to a supporting back end, you should be able to easily navigate -to example traces that fell into each bucket in the Distribution. -*/ -package stats // import "go.opencensus.io/stats" diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go deleted file mode 100644 index 436dc791f83..00000000000 --- a/vendor/go.opencensus.io/stats/internal/record.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "go.opencensus.io/tag" -) - -// DefaultRecorder will be called for each Record call. -var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) - -// MeasurementRecorder will be called for each Record call. This is the same as DefaultRecorder but -// avoids interface{} conversion. -// This will be a func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) type, -// but is interface{} here to avoid import loops -var MeasurementRecorder interface{} - -// SubscriptionReporter reports when a view subscribed with a measure. -var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go deleted file mode 100644 index 1ffd3cefc73..00000000000 --- a/vendor/go.opencensus.io/stats/measure.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -import ( - "sync" - "sync/atomic" -) - -// Measure represents a single numeric value to be tracked and recorded. -// For example, latency, request bytes, and response bytes could be measures -// to collect from a server. -// -// Measures by themselves have no outside effects. In order to be exported, -// the measure needs to be used in a View. If no Views are defined over a -// measure, there is very little cost in recording it. -type Measure interface { - // Name returns the name of this measure. - // - // Measure names are globally unique (among all libraries linked into your program). - // We recommend prefixing the measure name with a domain name relevant to your - // project or application. - // - // Measure names are never sent over the wire or exported to backends. - // They are only used to create Views. - Name() string - - // Description returns the human-readable description of this measure. - Description() string - - // Unit returns the units for the values this measure takes on. - // - // Units are encoded according to the case-sensitive abbreviations from the - // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html - Unit() string -} - -// measureDescriptor is the untyped descriptor associated with each measure. -// Int64Measure and Float64Measure wrap measureDescriptor to provide typed -// recording APIs. -// Two Measures with the same name will have the same measureDescriptor. -type measureDescriptor struct { - subs int32 // access atomically - - name string - description string - unit string -} - -func (m *measureDescriptor) subscribe() { - atomic.StoreInt32(&m.subs, 1) -} - -func (m *measureDescriptor) subscribed() bool { - return atomic.LoadInt32(&m.subs) == 1 -} - -var ( - mu sync.RWMutex - measures = make(map[string]*measureDescriptor) -) - -func registerMeasureHandle(name, desc, unit string) *measureDescriptor { - mu.Lock() - defer mu.Unlock() - - if stored, ok := measures[name]; ok { - return stored - } - m := &measureDescriptor{ - name: name, - description: desc, - unit: unit, - } - measures[name] = m - return m -} - -// Measurement is the numeric value measured when recording stats. Each measure -// provides methods to create measurements of their kind. For example, Int64Measure -// provides M to convert an int64 into a measurement. -type Measurement struct { - v float64 - m Measure - desc *measureDescriptor -} - -// Value returns the value of the Measurement as a float64. -func (m Measurement) Value() float64 { - return m.v -} - -// Measure returns the Measure from which this Measurement was created. -func (m Measurement) Measure() Measure { - return m.m -} diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go deleted file mode 100644 index f02c1eda845..00000000000 --- a/vendor/go.opencensus.io/stats/measure_float64.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -// Float64Measure is a measure for float64 values. -type Float64Measure struct { - desc *measureDescriptor -} - -// M creates a new float64 measurement. -// Use Record to record measurements. -func (m *Float64Measure) M(v float64) Measurement { - return Measurement{ - m: m, - desc: m.desc, - v: v, - } -} - -// Float64 creates a new measure for float64 values. -// -// See the documentation for interface Measure for more guidance on the -// parameters of this function. -func Float64(name, description, unit string) *Float64Measure { - mi := registerMeasureHandle(name, description, unit) - return &Float64Measure{mi} -} - -// Name returns the name of the measure. -func (m *Float64Measure) Name() string { - return m.desc.name -} - -// Description returns the description of the measure. -func (m *Float64Measure) Description() string { - return m.desc.description -} - -// Unit returns the unit of the measure. -func (m *Float64Measure) Unit() string { - return m.desc.unit -} diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go deleted file mode 100644 index d101d797358..00000000000 --- a/vendor/go.opencensus.io/stats/measure_int64.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -// Int64Measure is a measure for int64 values. -type Int64Measure struct { - desc *measureDescriptor -} - -// M creates a new int64 measurement. -// Use Record to record measurements. -func (m *Int64Measure) M(v int64) Measurement { - return Measurement{ - m: m, - desc: m.desc, - v: float64(v), - } -} - -// Int64 creates a new measure for int64 values. -// -// See the documentation for interface Measure for more guidance on the -// parameters of this function. -func Int64(name, description, unit string) *Int64Measure { - mi := registerMeasureHandle(name, description, unit) - return &Int64Measure{mi} -} - -// Name returns the name of the measure. -func (m *Int64Measure) Name() string { - return m.desc.name -} - -// Description returns the description of the measure. -func (m *Int64Measure) Description() string { - return m.desc.description -} - -// Unit returns the unit of the measure. -func (m *Int64Measure) Unit() string { - return m.desc.unit -} diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go deleted file mode 100644 index 8b5b99803ce..00000000000 --- a/vendor/go.opencensus.io/stats/record.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -import ( - "context" - - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/stats/internal" - "go.opencensus.io/tag" -) - -func init() { - internal.SubscriptionReporter = func(measure string) { - mu.Lock() - measures[measure].subscribe() - mu.Unlock() - } -} - -// Recorder provides an interface for exporting measurement information from -// the static Record method by using the WithRecorder option. -type Recorder interface { - // Record records a set of measurements associated with the given tags and attachments. - // The second argument is a `[]Measurement`. - Record(*tag.Map, interface{}, map[string]interface{}) -} - -type recordOptions struct { - attachments metricdata.Attachments - mutators []tag.Mutator - measurements []Measurement - recorder Recorder -} - -// WithAttachments applies provided exemplar attachments. -func WithAttachments(attachments metricdata.Attachments) Options { - return func(ro *recordOptions) { - ro.attachments = attachments - } -} - -// WithTags applies provided tag mutators. -func WithTags(mutators ...tag.Mutator) Options { - return func(ro *recordOptions) { - ro.mutators = mutators - } -} - -// WithMeasurements applies provided measurements. -func WithMeasurements(measurements ...Measurement) Options { - return func(ro *recordOptions) { - ro.measurements = measurements - } -} - -// WithRecorder records the measurements to the specified `Recorder`, rather -// than to the global metrics recorder. -func WithRecorder(meter Recorder) Options { - return func(ro *recordOptions) { - ro.recorder = meter - } -} - -// Options apply changes to recordOptions. -type Options func(*recordOptions) - -func createRecordOption(ros ...Options) *recordOptions { - o := &recordOptions{} - for _, ro := range ros { - ro(o) - } - return o -} - -type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) - -// Record records one or multiple measurements with the same context at once. -// If there are any tags in the context, measurements will be tagged with them. -func Record(ctx context.Context, ms ...Measurement) { - // Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality - // (RecordOptions) we can reduce some allocations to speed up this hot path - if len(ms) == 0 { - return - } - recorder := internal.MeasurementRecorder.(measurementRecorder) - record := false - for _, m := range ms { - if m.desc.subscribed() { - record = true - break - } - } - if !record { - return - } - recorder(tag.FromContext(ctx), ms, nil) - return -} - -// RecordWithTags records one or multiple measurements at once. -// -// Measurements will be tagged with the tags in the context mutated by the mutators. -// RecordWithTags is useful if you want to record with tag mutations but don't want -// to propagate the mutations in the context. -func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { - return RecordWithOptions(ctx, WithTags(mutators...), WithMeasurements(ms...)) -} - -// RecordWithOptions records measurements from the given options (if any) against context -// and tags and attachments in the options (if any). -// If there are any tags in the context, measurements will be tagged with them. -func RecordWithOptions(ctx context.Context, ros ...Options) error { - o := createRecordOption(ros...) - if len(o.measurements) == 0 { - return nil - } - recorder := internal.DefaultRecorder - if o.recorder != nil { - recorder = o.recorder.Record - } - if recorder == nil { - return nil - } - record := false - for _, m := range o.measurements { - if m.desc.subscribed() { - record = true - break - } - } - if !record { - return nil - } - if len(o.mutators) > 0 { - var err error - if ctx, err = tag.New(ctx, o.mutators...); err != nil { - return err - } - } - recorder(tag.FromContext(ctx), o.measurements, o.attachments) - return nil -} diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go deleted file mode 100644 index 736399652cc..00000000000 --- a/vendor/go.opencensus.io/stats/units.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package stats - -// Units are encoded according to the case-sensitive abbreviations from the -// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html -const ( - UnitNone = "1" // Deprecated: Use UnitDimensionless. - UnitDimensionless = "1" - UnitBytes = "By" - UnitMilliseconds = "ms" - UnitSeconds = "s" -) diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go deleted file mode 100644 index 61f72d20da3..00000000000 --- a/vendor/go.opencensus.io/stats/view/aggregation.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import "time" - -// AggType represents the type of aggregation function used on a View. -type AggType int - -// All available aggregation types. -const ( - AggTypeNone AggType = iota // no aggregation; reserved for future use. - AggTypeCount // the count aggregation, see Count. - AggTypeSum // the sum aggregation, see Sum. - AggTypeDistribution // the distribution aggregation, see Distribution. - AggTypeLastValue // the last value aggregation, see LastValue. -) - -func (t AggType) String() string { - return aggTypeName[t] -} - -var aggTypeName = map[AggType]string{ - AggTypeNone: "None", - AggTypeCount: "Count", - AggTypeSum: "Sum", - AggTypeDistribution: "Distribution", - AggTypeLastValue: "LastValue", -} - -// Aggregation represents a data aggregation method. Use one of the functions: -// Count, Sum, or Distribution to construct an Aggregation. -type Aggregation struct { - Type AggType // Type is the AggType of this Aggregation. - Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution. - - newData func(time.Time) AggregationData -} - -var ( - aggCount = &Aggregation{ - Type: AggTypeCount, - newData: func(t time.Time) AggregationData { - return &CountData{Start: t} - }, - } - aggSum = &Aggregation{ - Type: AggTypeSum, - newData: func(t time.Time) AggregationData { - return &SumData{Start: t} - }, - } -) - -// Count indicates that data collected and aggregated -// with this method will be turned into a count value. -// For example, total number of accepted requests can be -// aggregated by using Count. -func Count() *Aggregation { - return aggCount -} - -// Sum indicates that data collected and aggregated -// with this method will be summed up. -// For example, accumulated request bytes can be aggregated by using -// Sum. -func Sum() *Aggregation { - return aggSum -} - -// Distribution indicates that the desired aggregation is -// a histogram distribution. -// -// A distribution aggregation may contain a histogram of the values in the -// population. The bucket boundaries for that histogram are described -// by the bounds. This defines len(bounds)+1 buckets. -// -// If len(bounds) >= 2 then the boundaries for bucket index i are: -// -// [-infinity, bounds[i]) for i = 0 -// [bounds[i-1], bounds[i]) for 0 < i < length -// [bounds[i-1], +infinity) for i = length -// -// If len(bounds) is 0 then there is no histogram associated with the -// distribution. There will be a single bucket with boundaries -// (-infinity, +infinity). -// -// If len(bounds) is 1 then there is no finite buckets, and that single -// element is the common boundary of the overflow and underflow buckets. -func Distribution(bounds ...float64) *Aggregation { - agg := &Aggregation{ - Type: AggTypeDistribution, - Buckets: bounds, - } - agg.newData = func(t time.Time) AggregationData { - return newDistributionData(agg, t) - } - return agg -} - -// LastValue only reports the last value recorded using this -// aggregation. All other measurements will be dropped. -func LastValue() *Aggregation { - return &Aggregation{ - Type: AggTypeLastValue, - newData: func(_ time.Time) AggregationData { - return &LastValueData{} - }, - } -} diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go deleted file mode 100644 index d93b520662d..00000000000 --- a/vendor/go.opencensus.io/stats/view/aggregation_data.go +++ /dev/null @@ -1,336 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "math" - "time" - - "go.opencensus.io/metric/metricdata" -) - -// AggregationData represents an aggregated value from a collection. -// They are reported on the view data during exporting. -// Mosts users won't directly access aggregration data. -type AggregationData interface { - isAggregationData() bool - addSample(v float64, attachments map[string]interface{}, t time.Time) - clone() AggregationData - equal(other AggregationData) bool - toPoint(t metricdata.Type, time time.Time) metricdata.Point - StartTime() time.Time -} - -const epsilon = 1e-9 - -// CountData is the aggregated data for the Count aggregation. -// A count aggregation processes data and counts the recordings. -// -// Most users won't directly access count data. -type CountData struct { - Start time.Time - Value int64 -} - -func (a *CountData) isAggregationData() bool { return true } - -func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) { - a.Value = a.Value + 1 -} - -func (a *CountData) clone() AggregationData { - return &CountData{Value: a.Value, Start: a.Start} -} - -func (a *CountData) equal(other AggregationData) bool { - a2, ok := other.(*CountData) - if !ok { - return false - } - - return a.Start.Equal(a2.Start) && a.Value == a2.Value -} - -func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { - switch metricType { - case metricdata.TypeCumulativeInt64: - return metricdata.NewInt64Point(t, a.Value) - default: - panic("unsupported metricdata.Type") - } -} - -// StartTime returns the start time of the data being aggregated by CountData. -func (a *CountData) StartTime() time.Time { - return a.Start -} - -// SumData is the aggregated data for the Sum aggregation. -// A sum aggregation processes data and sums up the recordings. -// -// Most users won't directly access sum data. -type SumData struct { - Start time.Time - Value float64 -} - -func (a *SumData) isAggregationData() bool { return true } - -func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) { - a.Value += v -} - -func (a *SumData) clone() AggregationData { - return &SumData{Value: a.Value, Start: a.Start} -} - -func (a *SumData) equal(other AggregationData) bool { - a2, ok := other.(*SumData) - if !ok { - return false - } - return a.Start.Equal(a2.Start) && math.Pow(a.Value-a2.Value, 2) < epsilon -} - -func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { - switch metricType { - case metricdata.TypeCumulativeInt64: - return metricdata.NewInt64Point(t, int64(a.Value)) - case metricdata.TypeCumulativeFloat64: - return metricdata.NewFloat64Point(t, a.Value) - default: - panic("unsupported metricdata.Type") - } -} - -// StartTime returns the start time of the data being aggregated by SumData. -func (a *SumData) StartTime() time.Time { - return a.Start -} - -// DistributionData is the aggregated data for the -// Distribution aggregation. -// -// Most users won't directly access distribution data. -// -// For a distribution with N bounds, the associated DistributionData will have -// N+1 buckets. -type DistributionData struct { - Count int64 // number of data points aggregated - Min float64 // minimum value in the distribution - Max float64 // max value in the distribution - Mean float64 // mean of the distribution - SumOfSquaredDev float64 // sum of the squared deviation from the mean - CountPerBucket []int64 // number of occurrences per bucket - // ExemplarsPerBucket is slice the same length as CountPerBucket containing - // an exemplar for the associated bucket, or nil. - ExemplarsPerBucket []*metricdata.Exemplar - bounds []float64 // histogram distribution of the values - Start time.Time -} - -func newDistributionData(agg *Aggregation, t time.Time) *DistributionData { - bucketCount := len(agg.Buckets) + 1 - return &DistributionData{ - CountPerBucket: make([]int64, bucketCount), - ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), - bounds: agg.Buckets, - Min: math.MaxFloat64, - Max: math.SmallestNonzeroFloat64, - Start: t, - } -} - -// Sum returns the sum of all samples collected. -func (a *DistributionData) Sum() float64 { return a.Mean * float64(a.Count) } - -func (a *DistributionData) variance() float64 { - if a.Count <= 1 { - return 0 - } - return a.SumOfSquaredDev / float64(a.Count-1) -} - -func (a *DistributionData) isAggregationData() bool { return true } - -// TODO(songy23): support exemplar attachments. -func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) { - if v < a.Min { - a.Min = v - } - if v > a.Max { - a.Max = v - } - a.Count++ - a.addToBucket(v, attachments, t) - - if a.Count == 1 { - a.Mean = v - return - } - - oldMean := a.Mean - a.Mean = a.Mean + (v-a.Mean)/float64(a.Count) - a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean) -} - -func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) { - var count *int64 - var i int - var b float64 - for i, b = range a.bounds { - if v < b { - count = &a.CountPerBucket[i] - break - } - } - if count == nil { // Last bucket. - i = len(a.bounds) - count = &a.CountPerBucket[i] - } - *count++ - if exemplar := getExemplar(v, attachments, t); exemplar != nil { - a.ExemplarsPerBucket[i] = exemplar - } -} - -func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar { - if len(attachments) == 0 { - return nil - } - return &metricdata.Exemplar{ - Value: v, - Timestamp: t, - Attachments: attachments, - } -} - -func (a *DistributionData) clone() AggregationData { - c := *a - c.CountPerBucket = append([]int64(nil), a.CountPerBucket...) - c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...) - return &c -} - -func (a *DistributionData) equal(other AggregationData) bool { - a2, ok := other.(*DistributionData) - if !ok { - return false - } - if a2 == nil { - return false - } - if len(a.CountPerBucket) != len(a2.CountPerBucket) { - return false - } - for i := range a.CountPerBucket { - if a.CountPerBucket[i] != a2.CountPerBucket[i] { - return false - } - } - return a.Start.Equal(a2.Start) && - a.Count == a2.Count && - a.Min == a2.Min && - a.Max == a2.Max && - math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon -} - -func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { - switch metricType { - case metricdata.TypeCumulativeDistribution: - buckets := []metricdata.Bucket{} - for i := 0; i < len(a.CountPerBucket); i++ { - buckets = append(buckets, metricdata.Bucket{ - Count: a.CountPerBucket[i], - Exemplar: a.ExemplarsPerBucket[i], - }) - } - bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds} - - val := &metricdata.Distribution{ - Count: a.Count, - Sum: a.Sum(), - SumOfSquaredDeviation: a.SumOfSquaredDev, - BucketOptions: bucketOptions, - Buckets: buckets, - } - return metricdata.NewDistributionPoint(t, val) - - default: - // TODO: [rghetia] when we have a use case for TypeGaugeDistribution. - panic("unsupported metricdata.Type") - } -} - -// StartTime returns the start time of the data being aggregated by DistributionData. -func (a *DistributionData) StartTime() time.Time { - return a.Start -} - -// LastValueData returns the last value recorded for LastValue aggregation. -type LastValueData struct { - Value float64 -} - -func (l *LastValueData) isAggregationData() bool { - return true -} - -func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) { - l.Value = v -} - -func (l *LastValueData) clone() AggregationData { - return &LastValueData{l.Value} -} - -func (l *LastValueData) equal(other AggregationData) bool { - a2, ok := other.(*LastValueData) - if !ok { - return false - } - return l.Value == a2.Value -} - -func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { - switch metricType { - case metricdata.TypeGaugeInt64: - return metricdata.NewInt64Point(t, int64(l.Value)) - case metricdata.TypeGaugeFloat64: - return metricdata.NewFloat64Point(t, l.Value) - default: - panic("unsupported metricdata.Type") - } -} - -// StartTime returns an empty time value as start time is not recorded when using last value -// aggregation. -func (l *LastValueData) StartTime() time.Time { - return time.Time{} -} - -// ClearStart clears the Start field from data if present. Useful for testing in cases where the -// start time will be nondeterministic. -func ClearStart(data AggregationData) { - switch data := data.(type) { - case *CountData: - data.Start = time.Time{} - case *SumData: - data.Start = time.Time{} - case *DistributionData: - data.Start = time.Time{} - } -} diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go deleted file mode 100644 index bcd6e08c748..00000000000 --- a/vendor/go.opencensus.io/stats/view/collector.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "sort" - "time" - - "go.opencensus.io/internal/tagencoding" - "go.opencensus.io/tag" -) - -type collector struct { - // signatures holds the aggregations values for each unique tag signature - // (values for all keys) to its aggregator. - signatures map[string]AggregationData - // Aggregation is the description of the aggregation to perform for this - // view. - a *Aggregation -} - -func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) { - aggregator, ok := c.signatures[s] - if !ok { - aggregator = c.a.newData(t) - c.signatures[s] = aggregator - } - aggregator.addSample(v, attachments, t) -} - -// collectRows returns a snapshot of the collected Row values. -func (c *collector) collectedRows(keys []tag.Key) []*Row { - rows := make([]*Row, 0, len(c.signatures)) - for sig, aggregator := range c.signatures { - tags := decodeTags([]byte(sig), keys) - row := &Row{Tags: tags, Data: aggregator.clone()} - rows = append(rows, row) - } - return rows -} - -func (c *collector) clearRows() { - c.signatures = make(map[string]AggregationData) -} - -// encodeWithKeys encodes the map by using values -// only associated with the keys provided. -func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { - // Compute the buffer length we will need ahead of time to avoid resizing later - reqLen := 0 - for _, k := range keys { - s, _ := m.Value(k) - // We will store each key + its length - reqLen += len(s) + 1 - } - vb := &tagencoding.Values{ - Buffer: make([]byte, reqLen), - } - for _, k := range keys { - v, _ := m.Value(k) - vb.WriteValue([]byte(v)) - } - return vb.Bytes() -} - -// decodeTags decodes tags from the buffer and -// orders them by the keys. -func decodeTags(buf []byte, keys []tag.Key) []tag.Tag { - vb := &tagencoding.Values{Buffer: buf} - var tags []tag.Tag - for _, k := range keys { - v := vb.ReadValue() - if v != nil { - tags = append(tags, tag.Tag{Key: k, Value: string(v)}) - } - } - vb.ReadIndex = 0 - sort.Slice(tags, func(i, j int) bool { return tags[i].Key.Name() < tags[j].Key.Name() }) - return tags -} diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go deleted file mode 100644 index 60bf0e39254..00000000000 --- a/vendor/go.opencensus.io/stats/view/doc.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// Package view contains support for collecting and exposing aggregates over stats. -// -// In order to collect measurements, views need to be defined and registered. -// A view allows recorded measurements to be filtered and aggregated. -// -// All recorded measurements can be grouped by a list of tags. -// -// OpenCensus provides several aggregation methods: Count, Distribution and Sum. -// -// Count only counts the number of measurement points recorded. -// Distribution provides statistical summary of the aggregated data by counting -// how many recorded measurements fall into each bucket. -// Sum adds up the measurement values. -// LastValue just keeps track of the most recently recorded measurement value. -// All aggregations are cumulative. -// -// Views can be registered and unregistered at any time during program execution. -// -// Libraries can define views but it is recommended that in most cases registering -// views be left up to applications. -// -// # Exporting -// -// Collected and aggregated data can be exported to a metric collection -// backend by registering its exporter. -// -// Multiple exporters can be registered to upload the data to various -// different back ends. -package view // import "go.opencensus.io/stats/view" - -// TODO(acetechnologist): Add a link to the language independent OpenCensus -// spec when it is available. diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go deleted file mode 100644 index 73ba11f5b6e..00000000000 --- a/vendor/go.opencensus.io/stats/view/export.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package view - -// Exporter exports the collected records as view data. -// -// The ExportView method should return quickly; if an -// Exporter takes a significant amount of time to -// process a Data, that work should be done on another goroutine. -// -// It is safe to assume that ExportView will not be called concurrently from -// multiple goroutines. -// -// The Data should not be modified. -type Exporter interface { - ExportView(viewData *Data) -} - -// RegisterExporter registers an exporter. -// Collected data will be reported via all the -// registered exporters. Once you no longer -// want data to be exported, invoke UnregisterExporter -// with the previously registered exporter. -// -// Binaries can register exporters, libraries shouldn't register exporters. -func RegisterExporter(e Exporter) { - defaultWorker.RegisterExporter(e) -} - -// UnregisterExporter unregisters an exporter. -func UnregisterExporter(e Exporter) { - defaultWorker.UnregisterExporter(e) -} diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go deleted file mode 100644 index 293b54ecbed..00000000000 --- a/vendor/go.opencensus.io/stats/view/view.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "sort" - "sync/atomic" - "time" - - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/stats" - "go.opencensus.io/tag" -) - -// View allows users to aggregate the recorded stats.Measurements. -// Views need to be passed to the Register function before data will be -// collected and sent to Exporters. -type View struct { - Name string // Name of View. Must be unique. If unset, will default to the name of the Measure. - Description string // Description is a human-readable description for this view. - - // TagKeys are the tag keys describing the grouping of this view. - // A single Row will be produced for each combination of associated tag values. - TagKeys []tag.Key - - // Measure is a stats.Measure to aggregate in this view. - Measure stats.Measure - - // Aggregation is the aggregation function to apply to the set of Measurements. - Aggregation *Aggregation -} - -// WithName returns a copy of the View with a new name. This is useful for -// renaming views to cope with limitations placed on metric names by various -// backends. -func (v *View) WithName(name string) *View { - vNew := *v - vNew.Name = name - return &vNew -} - -// same compares two views and returns true if they represent the same aggregation. -func (v *View) same(other *View) bool { - if v == other { - return true - } - if v == nil { - return false - } - return reflect.DeepEqual(v.Aggregation, other.Aggregation) && - v.Measure.Name() == other.Measure.Name() -} - -// ErrNegativeBucketBounds error returned if histogram contains negative bounds. -// -// Deprecated: this should not be public. -var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported") - -// canonicalize canonicalizes v by setting explicit -// defaults for Name and Description and sorting the TagKeys -func (v *View) canonicalize() error { - if v.Measure == nil { - return fmt.Errorf("cannot register view %q: measure not set", v.Name) - } - if v.Aggregation == nil { - return fmt.Errorf("cannot register view %q: aggregation not set", v.Name) - } - if v.Name == "" { - v.Name = v.Measure.Name() - } - if v.Description == "" { - v.Description = v.Measure.Description() - } - if err := checkViewName(v.Name); err != nil { - return err - } - sort.Slice(v.TagKeys, func(i, j int) bool { - return v.TagKeys[i].Name() < v.TagKeys[j].Name() - }) - sort.Float64s(v.Aggregation.Buckets) - for _, b := range v.Aggregation.Buckets { - if b < 0 { - return ErrNegativeBucketBounds - } - } - // drop 0 bucket silently. - v.Aggregation.Buckets = dropZeroBounds(v.Aggregation.Buckets...) - - return nil -} - -func dropZeroBounds(bounds ...float64) []float64 { - for i, bound := range bounds { - if bound > 0 { - return bounds[i:] - } - } - return []float64{} -} - -// viewInternal is the internal representation of a View. -type viewInternal struct { - view *View // view is the canonicalized View definition associated with this view. - subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access - collector *collector - metricDescriptor *metricdata.Descriptor -} - -func newViewInternal(v *View) (*viewInternal, error) { - return &viewInternal{ - view: v, - collector: &collector{make(map[string]AggregationData), v.Aggregation}, - metricDescriptor: viewToMetricDescriptor(v), - }, nil -} - -func (v *viewInternal) subscribe() { - atomic.StoreUint32(&v.subscribed, 1) -} - -func (v *viewInternal) unsubscribe() { - atomic.StoreUint32(&v.subscribed, 0) -} - -// isSubscribed returns true if the view is exporting -// data by subscription. -func (v *viewInternal) isSubscribed() bool { - return atomic.LoadUint32(&v.subscribed) == 1 -} - -func (v *viewInternal) clearRows() { - v.collector.clearRows() -} - -func (v *viewInternal) collectedRows() []*Row { - return v.collector.collectedRows(v.view.TagKeys) -} - -func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) { - if !v.isSubscribed() { - return - } - sig := string(encodeWithKeys(m, v.view.TagKeys)) - v.collector.addSample(sig, val, attachments, t) -} - -// A Data is a set of rows about usage of the single measure associated -// with the given view. Each row is specific to a unique set of tags. -type Data struct { - View *View - Start, End time.Time - Rows []*Row -} - -// Row is the collected value for a specific set of key value pairs a.k.a tags. -type Row struct { - Tags []tag.Tag - Data AggregationData -} - -func (r *Row) String() string { - var buffer bytes.Buffer - buffer.WriteString("{ ") - buffer.WriteString("{ ") - for _, t := range r.Tags { - buffer.WriteString(fmt.Sprintf("{%v %v}", t.Key.Name(), t.Value)) - } - buffer.WriteString(" }") - buffer.WriteString(fmt.Sprintf("%v", r.Data)) - buffer.WriteString(" }") - return buffer.String() -} - -// Equal returns true if both rows are equal. Tags are expected to be ordered -// by the key name. Even if both rows have the same tags but the tags appear in -// different orders it will return false. -func (r *Row) Equal(other *Row) bool { - if r == other { - return true - } - return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data) -} - -const maxNameLength = 255 - -// Returns true if the given string contains only printable characters. -func isPrintable(str string) bool { - for _, r := range str { - if !(r >= ' ' && r <= '~') { - return false - } - } - return true -} - -func checkViewName(name string) error { - if len(name) > maxNameLength { - return fmt.Errorf("view name cannot be larger than %v", maxNameLength) - } - if !isPrintable(name) { - return fmt.Errorf("view name needs to be an ASCII string") - } - return nil -} diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go deleted file mode 100644 index 57d615ec7e1..00000000000 --- a/vendor/go.opencensus.io/stats/view/view_to_metric.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "time" - - "go.opencensus.io/resource" - - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/stats" -) - -func getUnit(unit string) metricdata.Unit { - switch unit { - case "1": - return metricdata.UnitDimensionless - case "ms": - return metricdata.UnitMilliseconds - case "By": - return metricdata.UnitBytes - } - return metricdata.UnitDimensionless -} - -func getType(v *View) metricdata.Type { - m := v.Measure - agg := v.Aggregation - - switch agg.Type { - case AggTypeSum: - switch m.(type) { - case *stats.Int64Measure: - return metricdata.TypeCumulativeInt64 - case *stats.Float64Measure: - return metricdata.TypeCumulativeFloat64 - default: - panic("unexpected measure type") - } - case AggTypeDistribution: - return metricdata.TypeCumulativeDistribution - case AggTypeLastValue: - switch m.(type) { - case *stats.Int64Measure: - return metricdata.TypeGaugeInt64 - case *stats.Float64Measure: - return metricdata.TypeGaugeFloat64 - default: - panic("unexpected measure type") - } - case AggTypeCount: - switch m.(type) { - case *stats.Int64Measure: - return metricdata.TypeCumulativeInt64 - case *stats.Float64Measure: - return metricdata.TypeCumulativeInt64 - default: - panic("unexpected measure type") - } - default: - panic("unexpected aggregation type") - } -} - -func getLabelKeys(v *View) []metricdata.LabelKey { - labelKeys := []metricdata.LabelKey{} - for _, k := range v.TagKeys { - labelKeys = append(labelKeys, metricdata.LabelKey{Key: k.Name()}) - } - return labelKeys -} - -func viewToMetricDescriptor(v *View) *metricdata.Descriptor { - return &metricdata.Descriptor{ - Name: v.Name, - Description: v.Description, - Unit: convertUnit(v), - Type: getType(v), - LabelKeys: getLabelKeys(v), - } -} - -func convertUnit(v *View) metricdata.Unit { - switch v.Aggregation.Type { - case AggTypeCount: - return metricdata.UnitDimensionless - default: - return getUnit(v.Measure.Unit()) - } -} - -func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue { - labelValues := []metricdata.LabelValue{} - tagMap := make(map[string]string) - for _, tag := range row.Tags { - tagMap[tag.Key.Name()] = tag.Value - } - - for _, key := range expectedKeys { - if val, ok := tagMap[key.Key]; ok { - labelValues = append(labelValues, metricdata.NewLabelValue(val)) - } else { - labelValues = append(labelValues, metricdata.LabelValue{}) - } - } - return labelValues -} - -func rowToTimeseries(v *viewInternal, row *Row, now time.Time) *metricdata.TimeSeries { - return &metricdata.TimeSeries{ - Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)}, - LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys), - StartTime: row.Data.StartTime(), - } -} - -func viewToMetric(v *viewInternal, r *resource.Resource, now time.Time) *metricdata.Metric { - rows := v.collectedRows() - if len(rows) == 0 { - return nil - } - - ts := []*metricdata.TimeSeries{} - for _, row := range rows { - ts = append(ts, rowToTimeseries(v, row, now)) - } - - m := &metricdata.Metric{ - Descriptor: *v.metricDescriptor, - TimeSeries: ts, - Resource: r, - } - return m -} diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go deleted file mode 100644 index 6a79cd8a34c..00000000000 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ /dev/null @@ -1,424 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "fmt" - "sync" - "time" - - "go.opencensus.io/resource" - - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/metric/metricproducer" - "go.opencensus.io/stats" - "go.opencensus.io/stats/internal" - "go.opencensus.io/tag" -) - -func init() { - defaultWorker = NewMeter().(*worker) - go defaultWorker.start() - internal.DefaultRecorder = record - internal.MeasurementRecorder = recordMeasurement -} - -type measureRef struct { - measure string - views map[*viewInternal]struct{} -} - -type worker struct { - measures map[string]*measureRef - views map[string]*viewInternal - viewStartTimes map[*viewInternal]time.Time - - timer *time.Ticker - c chan command - quit, done chan bool - mu sync.RWMutex - r *resource.Resource - - exportersMu sync.RWMutex - exporters map[Exporter]struct{} -} - -// Meter defines an interface which allows a single process to maintain -// multiple sets of metrics exports (intended for the advanced case where a -// single process wants to report metrics about multiple objects, such as -// multiple databases or HTTP services). -// -// Note that this is an advanced use case, and the static functions in this -// module should cover the common use cases. -type Meter interface { - stats.Recorder - // Find returns a registered view associated with this name. - // If no registered view is found, nil is returned. - Find(name string) *View - // Register begins collecting data for the given views. - // Once a view is registered, it reports data to the registered exporters. - Register(views ...*View) error - // Unregister the given views. Data will not longer be exported for these views - // after Unregister returns. - // It is not necessary to unregister from views you expect to collect for the - // duration of your program execution. - Unregister(views ...*View) - // SetReportingPeriod sets the interval between reporting aggregated views in - // the program. If duration is less than or equal to zero, it enables the - // default behavior. - // - // Note: each exporter makes different promises about what the lowest supported - // duration is. For example, the Stackdriver exporter recommends a value no - // lower than 1 minute. Consult each exporter per your needs. - SetReportingPeriod(time.Duration) - - // RegisterExporter registers an exporter. - // Collected data will be reported via all the - // registered exporters. Once you no longer - // want data to be exported, invoke UnregisterExporter - // with the previously registered exporter. - // - // Binaries can register exporters, libraries shouldn't register exporters. - RegisterExporter(Exporter) - // UnregisterExporter unregisters an exporter. - UnregisterExporter(Exporter) - // SetResource may be used to set the Resource associated with this registry. - // This is intended to be used in cases where a single process exports metrics - // for multiple Resources, typically in a multi-tenant situation. - SetResource(*resource.Resource) - - // Start causes the Meter to start processing Record calls and aggregating - // statistics as well as exporting data. - Start() - // Stop causes the Meter to stop processing calls and terminate data export. - Stop() - - // RetrieveData gets a snapshot of the data collected for the the view registered - // with the given name. It is intended for testing only. - RetrieveData(viewName string) ([]*Row, error) -} - -var _ Meter = (*worker)(nil) - -var defaultWorker *worker - -var defaultReportingDuration = 10 * time.Second - -// Find returns a registered view associated with this name. -// If no registered view is found, nil is returned. -func Find(name string) (v *View) { - return defaultWorker.Find(name) -} - -// Find returns a registered view associated with this name. -// If no registered view is found, nil is returned. -func (w *worker) Find(name string) (v *View) { - req := &getViewByNameReq{ - name: name, - c: make(chan *getViewByNameResp), - } - w.c <- req - resp := <-req.c - return resp.v -} - -// Register begins collecting data for the given views. -// Once a view is registered, it reports data to the registered exporters. -func Register(views ...*View) error { - return defaultWorker.Register(views...) -} - -// Register begins collecting data for the given views. -// Once a view is registered, it reports data to the registered exporters. -func (w *worker) Register(views ...*View) error { - req := ®isterViewReq{ - views: views, - err: make(chan error), - } - w.c <- req - return <-req.err -} - -// Unregister the given views. Data will not longer be exported for these views -// after Unregister returns. -// It is not necessary to unregister from views you expect to collect for the -// duration of your program execution. -func Unregister(views ...*View) { - defaultWorker.Unregister(views...) -} - -// Unregister the given views. Data will not longer be exported for these views -// after Unregister returns. -// It is not necessary to unregister from views you expect to collect for the -// duration of your program execution. -func (w *worker) Unregister(views ...*View) { - names := make([]string, len(views)) - for i := range views { - names[i] = views[i].Name - } - req := &unregisterFromViewReq{ - views: names, - done: make(chan struct{}), - } - w.c <- req - <-req.done -} - -// RetrieveData gets a snapshot of the data collected for the the view registered -// with the given name. It is intended for testing only. -func RetrieveData(viewName string) ([]*Row, error) { - return defaultWorker.RetrieveData(viewName) -} - -// RetrieveData gets a snapshot of the data collected for the the view registered -// with the given name. It is intended for testing only. -func (w *worker) RetrieveData(viewName string) ([]*Row, error) { - req := &retrieveDataReq{ - now: time.Now(), - v: viewName, - c: make(chan *retrieveDataResp), - } - w.c <- req - resp := <-req.c - return resp.rows, resp.err -} - -func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { - defaultWorker.Record(tags, ms, attachments) -} - -func recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { - defaultWorker.recordMeasurement(tags, ms, attachments) -} - -// Record records a set of measurements ms associated with the given tags and attachments. -func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { - w.recordMeasurement(tags, ms.([]stats.Measurement), attachments) -} - -// recordMeasurement records a set of measurements ms associated with the given tags and attachments. -// This is the same as Record but without an interface{} type to avoid allocations -func (w *worker) recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { - req := &recordReq{ - tm: tags, - ms: ms, - attachments: attachments, - t: time.Now(), - } - w.c <- req -} - -// SetReportingPeriod sets the interval between reporting aggregated views in -// the program. If duration is less than or equal to zero, it enables the -// default behavior. -// -// Note: each exporter makes different promises about what the lowest supported -// duration is. For example, the Stackdriver exporter recommends a value no -// lower than 1 minute. Consult each exporter per your needs. -func SetReportingPeriod(d time.Duration) { - defaultWorker.SetReportingPeriod(d) -} - -// Stop stops the default worker. -func Stop() { - defaultWorker.Stop() -} - -// SetReportingPeriod sets the interval between reporting aggregated views in -// the program. If duration is less than or equal to zero, it enables the -// default behavior. -// -// Note: each exporter makes different promises about what the lowest supported -// duration is. For example, the Stackdriver exporter recommends a value no -// lower than 1 minute. Consult each exporter per your needs. -func (w *worker) SetReportingPeriod(d time.Duration) { - // TODO(acetechnologist): ensure that the duration d is more than a certain - // value. e.g. 1s - req := &setReportingPeriodReq{ - d: d, - c: make(chan bool), - } - w.c <- req - <-req.c // don't return until the timer is set to the new duration. -} - -// NewMeter constructs a Meter instance. You should only need to use this if -// you need to separate out Measurement recordings and View aggregations within -// a single process. -func NewMeter() Meter { - return &worker{ - measures: make(map[string]*measureRef), - views: make(map[string]*viewInternal), - viewStartTimes: make(map[*viewInternal]time.Time), - timer: time.NewTicker(defaultReportingDuration), - c: make(chan command, 1024), - quit: make(chan bool), - done: make(chan bool), - - exporters: make(map[Exporter]struct{}), - } -} - -// SetResource associates all data collected by this Meter with the specified -// resource. This resource is reported when using metricexport.ReadAndExport; -// it is not provided when used with ExportView/RegisterExporter, because that -// interface does not provide a means for reporting the Resource. -func (w *worker) SetResource(r *resource.Resource) { - w.r = r -} - -func (w *worker) Start() { - go w.start() -} - -func (w *worker) start() { - prodMgr := metricproducer.GlobalManager() - prodMgr.AddProducer(w) - - for { - select { - case cmd := <-w.c: - cmd.handleCommand(w) - case <-w.timer.C: - w.reportUsage() - case <-w.quit: - w.timer.Stop() - close(w.c) - close(w.done) - return - } - } -} - -func (w *worker) Stop() { - prodMgr := metricproducer.GlobalManager() - prodMgr.DeleteProducer(w) - select { - case <-w.quit: - default: - close(w.quit) - } - <-w.done -} - -func (w *worker) getMeasureRef(name string) *measureRef { - if mr, ok := w.measures[name]; ok { - return mr - } - mr := &measureRef{ - measure: name, - views: make(map[*viewInternal]struct{}), - } - w.measures[name] = mr - return mr -} - -func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { - w.mu.Lock() - defer w.mu.Unlock() - vi, err := newViewInternal(v) - if err != nil { - return nil, err - } - if x, ok := w.views[vi.view.Name]; ok { - if !x.view.same(vi.view) { - return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name) - } - - // the view is already registered so there is nothing to do and the - // command is considered successful. - return x, nil - } - w.views[vi.view.Name] = vi - w.viewStartTimes[vi] = time.Now() - ref := w.getMeasureRef(vi.view.Measure.Name()) - ref.views[vi] = struct{}{} - return vi, nil -} - -func (w *worker) unregisterView(v *viewInternal) { - w.mu.Lock() - defer w.mu.Unlock() - delete(w.views, v.view.Name) - delete(w.viewStartTimes, v) - if measure := w.measures[v.view.Measure.Name()]; measure != nil { - delete(measure.views, v) - } -} - -func (w *worker) reportView(v *viewInternal) { - if !v.isSubscribed() { - return - } - rows := v.collectedRows() - viewData := &Data{ - View: v.view, - Start: w.viewStartTimes[v], - End: time.Now(), - Rows: rows, - } - w.exportersMu.Lock() - defer w.exportersMu.Unlock() - for e := range w.exporters { - e.ExportView(viewData) - } -} - -func (w *worker) reportUsage() { - w.mu.Lock() - defer w.mu.Unlock() - for _, v := range w.views { - w.reportView(v) - } -} - -func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { - if !v.isSubscribed() { - return nil - } - - return viewToMetric(v, w.r, now) -} - -// Read reads all view data and returns them as metrics. -// It is typically invoked by metric reader to export stats in metric format. -func (w *worker) Read() []*metricdata.Metric { - w.mu.Lock() - defer w.mu.Unlock() - now := time.Now() - metrics := make([]*metricdata.Metric, 0, len(w.views)) - for _, v := range w.views { - metric := w.toMetric(v, now) - if metric != nil { - metrics = append(metrics, metric) - } - } - return metrics -} - -func (w *worker) RegisterExporter(e Exporter) { - w.exportersMu.Lock() - defer w.exportersMu.Unlock() - - w.exporters[e] = struct{}{} -} - -func (w *worker) UnregisterExporter(e Exporter) { - w.exportersMu.Lock() - defer w.exportersMu.Unlock() - - delete(w.exporters, e) -} diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go deleted file mode 100644 index 9ac4cc05992..00000000000 --- a/vendor/go.opencensus.io/stats/view/worker_commands.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package view - -import ( - "errors" - "fmt" - "strings" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/internal" - "go.opencensus.io/tag" -) - -type command interface { - handleCommand(w *worker) -} - -// getViewByNameReq is the command to get a view given its name. -type getViewByNameReq struct { - name string - c chan *getViewByNameResp -} - -type getViewByNameResp struct { - v *View -} - -func (cmd *getViewByNameReq) handleCommand(w *worker) { - v := w.views[cmd.name] - if v == nil { - cmd.c <- &getViewByNameResp{nil} - return - } - cmd.c <- &getViewByNameResp{v.view} -} - -// registerViewReq is the command to register a view. -type registerViewReq struct { - views []*View - err chan error -} - -func (cmd *registerViewReq) handleCommand(w *worker) { - for _, v := range cmd.views { - if err := v.canonicalize(); err != nil { - cmd.err <- err - return - } - } - var errstr []string - for _, view := range cmd.views { - vi, err := w.tryRegisterView(view) - if err != nil { - errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err)) - continue - } - internal.SubscriptionReporter(view.Measure.Name()) - vi.subscribe() - } - if len(errstr) > 0 { - cmd.err <- errors.New(strings.Join(errstr, "\n")) - } else { - cmd.err <- nil - } -} - -// unregisterFromViewReq is the command to unregister to a view. Has no -// impact on the data collection for client that are pulling data from the -// library. -type unregisterFromViewReq struct { - views []string - done chan struct{} -} - -func (cmd *unregisterFromViewReq) handleCommand(w *worker) { - for _, name := range cmd.views { - vi, ok := w.views[name] - if !ok { - continue - } - - // Report pending data for this view before removing it. - w.reportView(vi) - - vi.unsubscribe() - if !vi.isSubscribed() { - // this was the last subscription and view is not collecting anymore. - // The collected data can be cleared. - vi.clearRows() - } - w.unregisterView(vi) - } - cmd.done <- struct{}{} -} - -// retrieveDataReq is the command to retrieve data for a view. -type retrieveDataReq struct { - now time.Time - v string - c chan *retrieveDataResp -} - -type retrieveDataResp struct { - rows []*Row - err error -} - -func (cmd *retrieveDataReq) handleCommand(w *worker) { - w.mu.Lock() - defer w.mu.Unlock() - vi, ok := w.views[cmd.v] - if !ok { - cmd.c <- &retrieveDataResp{ - nil, - fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v), - } - return - } - - if !vi.isSubscribed() { - cmd.c <- &retrieveDataResp{ - nil, - fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v), - } - return - } - cmd.c <- &retrieveDataResp{ - vi.collectedRows(), - nil, - } -} - -// recordReq is the command to record data related to multiple measures -// at once. -type recordReq struct { - tm *tag.Map - ms []stats.Measurement - attachments map[string]interface{} - t time.Time -} - -func (cmd *recordReq) handleCommand(w *worker) { - w.mu.Lock() - defer w.mu.Unlock() - for _, m := range cmd.ms { - if (m == stats.Measurement{}) { // not registered - continue - } - ref := w.getMeasureRef(m.Measure().Name()) - for v := range ref.views { - v.addSample(cmd.tm, m.Value(), cmd.attachments, cmd.t) - } - } -} - -// setReportingPeriodReq is the command to modify the duration between -// reporting the collected data to the registered clients. -type setReportingPeriodReq struct { - d time.Duration - c chan bool -} - -func (cmd *setReportingPeriodReq) handleCommand(w *worker) { - w.timer.Stop() - if cmd.d <= 0 { - w.timer = time.NewTicker(defaultReportingDuration) - } else { - w.timer = time.NewTicker(cmd.d) - } - cmd.c <- true -} diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go deleted file mode 100644 index b27d1b26b13..00000000000 --- a/vendor/go.opencensus.io/tag/context.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -import ( - "context" -) - -// FromContext returns the tag map stored in the context. -func FromContext(ctx context.Context) *Map { - // The returned tag map shouldn't be mutated. - ts := ctx.Value(mapCtxKey) - if ts == nil { - return nil - } - return ts.(*Map) -} - -// NewContext creates a new context with the given tag map. -// To propagate a tag map to downstream methods and downstream RPCs, add a tag map -// to the current context. NewContext will return a copy of the current context, -// and put the tag map into the returned one. -// If there is already a tag map in the current context, it will be replaced with m. -func NewContext(ctx context.Context, m *Map) context.Context { - return context.WithValue(ctx, mapCtxKey, m) -} - -type ctxKey struct{} - -var mapCtxKey = ctxKey{} diff --git a/vendor/go.opencensus.io/tag/doc.go b/vendor/go.opencensus.io/tag/doc.go deleted file mode 100644 index da16b74e4de..00000000000 --- a/vendor/go.opencensus.io/tag/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -/* -Package tag contains OpenCensus tags. - -Tags are key-value pairs. Tags provide additional cardinality to -the OpenCensus instrumentation data. - -Tags can be propagated on the wire and in the same -process via context.Context. Encode and Decode should be -used to represent tags into their binary propagation form. -*/ -package tag // import "go.opencensus.io/tag" diff --git a/vendor/go.opencensus.io/tag/key.go b/vendor/go.opencensus.io/tag/key.go deleted file mode 100644 index 71ec913657b..00000000000 --- a/vendor/go.opencensus.io/tag/key.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -// Key represents a tag key. -type Key struct { - name string -} - -// NewKey creates or retrieves a string key identified by name. -// Calling NewKey more than once with the same name returns the same key. -func NewKey(name string) (Key, error) { - if !checkKeyName(name) { - return Key{}, errInvalidKeyName - } - return Key{name: name}, nil -} - -// MustNewKey returns a key with the given name, and panics if name is an invalid key name. -func MustNewKey(name string) Key { - k, err := NewKey(name) - if err != nil { - panic(err) - } - return k -} - -// Name returns the name of the key. -func (k Key) Name() string { - return k.name -} diff --git a/vendor/go.opencensus.io/tag/map.go b/vendor/go.opencensus.io/tag/map.go deleted file mode 100644 index 0272ef85a4c..00000000000 --- a/vendor/go.opencensus.io/tag/map.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -import ( - "bytes" - "context" - "fmt" - "sort" -) - -// Tag is a key value pair that can be propagated on wire. -type Tag struct { - Key Key - Value string -} - -type tagContent struct { - value string - m metadatas -} - -// Map is a map of tags. Use New to create a context containing -// a new Map. -type Map struct { - m map[Key]tagContent -} - -// Value returns the value for the key if a value for the key exists. -func (m *Map) Value(k Key) (string, bool) { - if m == nil { - return "", false - } - v, ok := m.m[k] - return v.value, ok -} - -func (m *Map) String() string { - if m == nil { - return "nil" - } - keys := make([]Key, 0, len(m.m)) - for k := range m.m { - keys = append(keys, k) - } - sort.Slice(keys, func(i, j int) bool { return keys[i].Name() < keys[j].Name() }) - - var buffer bytes.Buffer - buffer.WriteString("{ ") - for _, k := range keys { - buffer.WriteString(fmt.Sprintf("{%v %v}", k.name, m.m[k])) - } - buffer.WriteString(" }") - return buffer.String() -} - -func (m *Map) insert(k Key, v string, md metadatas) { - if _, ok := m.m[k]; ok { - return - } - m.m[k] = tagContent{value: v, m: md} -} - -func (m *Map) update(k Key, v string, md metadatas) { - if _, ok := m.m[k]; ok { - m.m[k] = tagContent{value: v, m: md} - } -} - -func (m *Map) upsert(k Key, v string, md metadatas) { - m.m[k] = tagContent{value: v, m: md} -} - -func (m *Map) delete(k Key) { - delete(m.m, k) -} - -func newMap() *Map { - return &Map{m: make(map[Key]tagContent)} -} - -// Mutator modifies a tag map. -type Mutator interface { - Mutate(t *Map) (*Map, error) -} - -// Insert returns a mutator that inserts a -// value associated with k. If k already exists in the tag map, -// mutator doesn't update the value. -// Metadata applies metadata to the tag. It is optional. -// Metadatas are applied in the order in which it is provided. -// If more than one metadata updates the same attribute then -// the update from the last metadata prevails. -func Insert(k Key, v string, mds ...Metadata) Mutator { - return &mutator{ - fn: func(m *Map) (*Map, error) { - if !checkValue(v) { - return nil, errInvalidValue - } - m.insert(k, v, createMetadatas(mds...)) - return m, nil - }, - } -} - -// Update returns a mutator that updates the -// value of the tag associated with k with v. If k doesn't -// exists in the tag map, the mutator doesn't insert the value. -// Metadata applies metadata to the tag. It is optional. -// Metadatas are applied in the order in which it is provided. -// If more than one metadata updates the same attribute then -// the update from the last metadata prevails. -func Update(k Key, v string, mds ...Metadata) Mutator { - return &mutator{ - fn: func(m *Map) (*Map, error) { - if !checkValue(v) { - return nil, errInvalidValue - } - m.update(k, v, createMetadatas(mds...)) - return m, nil - }, - } -} - -// Upsert returns a mutator that upserts the -// value of the tag associated with k with v. It inserts the -// value if k doesn't exist already. It mutates the value -// if k already exists. -// Metadata applies metadata to the tag. It is optional. -// Metadatas are applied in the order in which it is provided. -// If more than one metadata updates the same attribute then -// the update from the last metadata prevails. -func Upsert(k Key, v string, mds ...Metadata) Mutator { - return &mutator{ - fn: func(m *Map) (*Map, error) { - if !checkValue(v) { - return nil, errInvalidValue - } - m.upsert(k, v, createMetadatas(mds...)) - return m, nil - }, - } -} - -func createMetadatas(mds ...Metadata) metadatas { - var metas metadatas - if len(mds) > 0 { - for _, md := range mds { - if md != nil { - md(&metas) - } - } - } else { - WithTTL(TTLUnlimitedPropagation)(&metas) - } - return metas - -} - -// Delete returns a mutator that deletes -// the value associated with k. -func Delete(k Key) Mutator { - return &mutator{ - fn: func(m *Map) (*Map, error) { - m.delete(k) - return m, nil - }, - } -} - -// New returns a new context that contains a tag map -// originated from the incoming context and modified -// with the provided mutators. -func New(ctx context.Context, mutator ...Mutator) (context.Context, error) { - m := newMap() - orig := FromContext(ctx) - if orig != nil { - for k, v := range orig.m { - if !checkKeyName(k.Name()) { - return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName) - } - if !checkValue(v.value) { - return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue) - } - m.insert(k, v.value, v.m) - } - } - var err error - for _, mod := range mutator { - m, err = mod.Mutate(m) - if err != nil { - return ctx, err - } - } - return NewContext(ctx, m), nil -} - -// Do is similar to pprof.Do: a convenience for installing the tags -// from the context as Go profiler labels. This allows you to -// correlated runtime profiling with stats. -// -// It converts the key/values from the given map to Go profiler labels -// and calls pprof.Do. -// -// Do is going to do nothing if your Go version is below 1.9. -func Do(ctx context.Context, f func(ctx context.Context)) { - do(ctx, f) -} - -type mutator struct { - fn func(t *Map) (*Map, error) -} - -func (m *mutator) Mutate(t *Map) (*Map, error) { - return m.fn(t) -} diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go deleted file mode 100644 index c242e695c8c..00000000000 --- a/vendor/go.opencensus.io/tag/map_codec.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -import ( - "encoding/binary" - "fmt" -) - -// KeyType defines the types of keys allowed. Currently only keyTypeString is -// supported. -type keyType byte - -const ( - keyTypeString keyType = iota - keyTypeInt64 - keyTypeTrue - keyTypeFalse - - tagsVersionID = byte(0) -) - -type encoderGRPC struct { - buf []byte - writeIdx, readIdx int -} - -// writeKeyString writes the fieldID '0' followed by the key string and value -// string. -func (eg *encoderGRPC) writeTagString(k, v string) { - eg.writeByte(byte(keyTypeString)) - eg.writeStringWithVarintLen(k) - eg.writeStringWithVarintLen(v) -} - -func (eg *encoderGRPC) writeTagUint64(k string, i uint64) { - eg.writeByte(byte(keyTypeInt64)) - eg.writeStringWithVarintLen(k) - eg.writeUint64(i) -} - -func (eg *encoderGRPC) writeTagTrue(k string) { - eg.writeByte(byte(keyTypeTrue)) - eg.writeStringWithVarintLen(k) -} - -func (eg *encoderGRPC) writeTagFalse(k string) { - eg.writeByte(byte(keyTypeFalse)) - eg.writeStringWithVarintLen(k) -} - -func (eg *encoderGRPC) writeBytesWithVarintLen(bytes []byte) { - length := len(bytes) - - eg.growIfRequired(binary.MaxVarintLen64 + length) - eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) - copy(eg.buf[eg.writeIdx:], bytes) - eg.writeIdx += length -} - -func (eg *encoderGRPC) writeStringWithVarintLen(s string) { - length := len(s) - - eg.growIfRequired(binary.MaxVarintLen64 + length) - eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) - copy(eg.buf[eg.writeIdx:], s) - eg.writeIdx += length -} - -func (eg *encoderGRPC) writeByte(v byte) { - eg.growIfRequired(1) - eg.buf[eg.writeIdx] = v - eg.writeIdx++ -} - -func (eg *encoderGRPC) writeUint32(i uint32) { - eg.growIfRequired(4) - binary.LittleEndian.PutUint32(eg.buf[eg.writeIdx:], i) - eg.writeIdx += 4 -} - -func (eg *encoderGRPC) writeUint64(i uint64) { - eg.growIfRequired(8) - binary.LittleEndian.PutUint64(eg.buf[eg.writeIdx:], i) - eg.writeIdx += 8 -} - -func (eg *encoderGRPC) readByte() byte { - b := eg.buf[eg.readIdx] - eg.readIdx++ - return b -} - -func (eg *encoderGRPC) readUint32() uint32 { - i := binary.LittleEndian.Uint32(eg.buf[eg.readIdx:]) - eg.readIdx += 4 - return i -} - -func (eg *encoderGRPC) readUint64() uint64 { - i := binary.LittleEndian.Uint64(eg.buf[eg.readIdx:]) - eg.readIdx += 8 - return i -} - -func (eg *encoderGRPC) readBytesWithVarintLen() ([]byte, error) { - if eg.readEnded() { - return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) - } - length, valueStart := binary.Uvarint(eg.buf[eg.readIdx:]) - if valueStart <= 0 { - return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) - } - - valueStart += eg.readIdx - valueEnd := valueStart + int(length) - if valueEnd > len(eg.buf) { - return nil, fmt.Errorf("malformed encoding: length:%v, upper:%v, maxLength:%v", length, valueEnd, len(eg.buf)) - } - - eg.readIdx = valueEnd - return eg.buf[valueStart:valueEnd], nil -} - -func (eg *encoderGRPC) readStringWithVarintLen() (string, error) { - bytes, err := eg.readBytesWithVarintLen() - if err != nil { - return "", err - } - return string(bytes), nil -} - -func (eg *encoderGRPC) growIfRequired(expected int) { - if len(eg.buf)-eg.writeIdx < expected { - tmp := make([]byte, 2*(len(eg.buf)+1)+expected) - copy(tmp, eg.buf) - eg.buf = tmp - } -} - -func (eg *encoderGRPC) readEnded() bool { - return eg.readIdx >= len(eg.buf) -} - -func (eg *encoderGRPC) bytes() []byte { - return eg.buf[:eg.writeIdx] -} - -// Encode encodes the tag map into a []byte. It is useful to propagate -// the tag maps on wire in binary format. -func Encode(m *Map) []byte { - if m == nil { - return nil - } - eg := &encoderGRPC{ - buf: make([]byte, len(m.m)), - } - eg.writeByte(tagsVersionID) - for k, v := range m.m { - if v.m.ttl.ttl == valueTTLUnlimitedPropagation { - eg.writeByte(byte(keyTypeString)) - eg.writeStringWithVarintLen(k.name) - eg.writeBytesWithVarintLen([]byte(v.value)) - } - } - return eg.bytes() -} - -// Decode decodes the given []byte into a tag map. -func Decode(bytes []byte) (*Map, error) { - ts := newMap() - err := DecodeEach(bytes, ts.upsert) - if err != nil { - // no partial failures - return nil, err - } - return ts, nil -} - -// DecodeEach decodes the given serialized tag map, calling handler for each -// tag key and value decoded. -func DecodeEach(bytes []byte, fn func(key Key, val string, md metadatas)) error { - eg := &encoderGRPC{ - buf: bytes, - } - if len(eg.buf) == 0 { - return nil - } - - version := eg.readByte() - if version > tagsVersionID { - return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID) - } - - for !eg.readEnded() { - typ := keyType(eg.readByte()) - - if typ != keyTypeString { - return fmt.Errorf("cannot decode: invalid key type: %q", typ) - } - - k, err := eg.readBytesWithVarintLen() - if err != nil { - return err - } - - v, err := eg.readBytesWithVarintLen() - if err != nil { - return err - } - - key, err := NewKey(string(k)) - if err != nil { - return err - } - val := string(v) - if !checkValue(val) { - return errInvalidValue - } - fn(key, val, createMetadatas(WithTTL(TTLUnlimitedPropagation))) - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/go.opencensus.io/tag/metadata.go b/vendor/go.opencensus.io/tag/metadata.go deleted file mode 100644 index 6571a583ea6..00000000000 --- a/vendor/go.opencensus.io/tag/metadata.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2019, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -const ( - // valueTTLNoPropagation prevents tag from propagating. - valueTTLNoPropagation = 0 - - // valueTTLUnlimitedPropagation allows tag to propagate without any limits on number of hops. - valueTTLUnlimitedPropagation = -1 -) - -// TTL is metadata that specifies number of hops a tag can propagate. -// Details about TTL metadata is specified at https://github.com/census-instrumentation/opencensus-specs/blob/master/tags/TagMap.md#tagmetadata -type TTL struct { - ttl int -} - -var ( - // TTLUnlimitedPropagation is TTL metadata that allows tag to propagate without any limits on number of hops. - TTLUnlimitedPropagation = TTL{ttl: valueTTLUnlimitedPropagation} - - // TTLNoPropagation is TTL metadata that prevents tag from propagating. - TTLNoPropagation = TTL{ttl: valueTTLNoPropagation} -) - -type metadatas struct { - ttl TTL -} - -// Metadata applies metadatas specified by the function. -type Metadata func(*metadatas) - -// WithTTL applies metadata with provided ttl. -func WithTTL(ttl TTL) Metadata { - return func(m *metadatas) { - m.ttl = ttl - } -} diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go deleted file mode 100644 index 8fb17226fe3..00000000000 --- a/vendor/go.opencensus.io/tag/profile_19.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.9 -// +build go1.9 - -package tag - -import ( - "context" - "runtime/pprof" -) - -func do(ctx context.Context, f func(ctx context.Context)) { - m := FromContext(ctx) - keyvals := make([]string, 0, 2*len(m.m)) - for k, v := range m.m { - keyvals = append(keyvals, k.Name(), v.value) - } - pprof.Do(ctx, pprof.Labels(keyvals...), f) -} diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go deleted file mode 100644 index e28cf13cde9..00000000000 --- a/vendor/go.opencensus.io/tag/profile_not19.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.9 -// +build !go1.9 - -package tag - -import "context" - -func do(ctx context.Context, f func(ctx context.Context)) { - f(ctx) -} diff --git a/vendor/go.opencensus.io/tag/validate.go b/vendor/go.opencensus.io/tag/validate.go deleted file mode 100644 index 0939fc67483..00000000000 --- a/vendor/go.opencensus.io/tag/validate.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tag - -import "errors" - -const ( - maxKeyLength = 255 - - // valid are restricted to US-ASCII subset (range 0x20 (' ') to 0x7e ('~')). - validKeyValueMin = 32 - validKeyValueMax = 126 -) - -var ( - errInvalidKeyName = errors.New("invalid key name: only ASCII characters accepted; max length must be 255 characters") - errInvalidValue = errors.New("invalid value: only ASCII characters accepted; max length must be 255 characters") -) - -func checkKeyName(name string) bool { - if len(name) == 0 { - return false - } - if len(name) > maxKeyLength { - return false - } - return isASCII(name) -} - -func isASCII(s string) bool { - for _, c := range s { - if (c < validKeyValueMin) || (c > validKeyValueMax) { - return false - } - } - return true -} - -func checkValue(v string) bool { - if len(v) > maxKeyLength { - return false - } - return isASCII(v) -} diff --git a/vendor/go.opencensus.io/trace/propagation/propagation.go b/vendor/go.opencensus.io/trace/propagation/propagation.go deleted file mode 100644 index 1eb190a96a3..00000000000 --- a/vendor/go.opencensus.io/trace/propagation/propagation.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package propagation implements the binary trace context format. -package propagation // import "go.opencensus.io/trace/propagation" - -// TODO: link to external spec document. - -// BinaryFormat format: -// -// Binary value: -// version_id: 1 byte representing the version id. -// -// For version_id = 0: -// -// version_format: -// field_format: -// -// Fields: -// -// TraceId: (field_id = 0, len = 16, default = "0000000000000000") - 16-byte array representing the trace_id. -// SpanId: (field_id = 1, len = 8, default = "00000000") - 8-byte array representing the span_id. -// TraceOptions: (field_id = 2, len = 1, default = "0") - 1-byte array representing the trace_options. -// -// Fields MUST be encoded using the field id order (smaller to higher). -// -// Valid value example: -// -// {0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, -// 98, 99, 100, 101, 102, 103, 104, 2, 1} -// -// version_id = 0; -// trace_id = {64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79} -// span_id = {97, 98, 99, 100, 101, 102, 103, 104}; -// trace_options = {1}; - -import ( - "net/http" - - "go.opencensus.io/trace" -) - -// Binary returns the binary format representation of a SpanContext. -// -// If sc is the zero value, Binary returns nil. -func Binary(sc trace.SpanContext) []byte { - if sc == (trace.SpanContext{}) { - return nil - } - var b [29]byte - copy(b[2:18], sc.TraceID[:]) - b[18] = 1 - copy(b[19:27], sc.SpanID[:]) - b[27] = 2 - b[28] = uint8(sc.TraceOptions) - return b[:] -} - -// FromBinary returns the SpanContext represented by b. -// -// If b has an unsupported version ID or contains no TraceID, FromBinary -// returns with ok==false. -func FromBinary(b []byte) (sc trace.SpanContext, ok bool) { - if len(b) == 0 || b[0] != 0 { - return trace.SpanContext{}, false - } - b = b[1:] - if len(b) >= 17 && b[0] == 0 { - copy(sc.TraceID[:], b[1:17]) - b = b[17:] - } else { - return trace.SpanContext{}, false - } - if len(b) >= 9 && b[0] == 1 { - copy(sc.SpanID[:], b[1:9]) - b = b[9:] - } - if len(b) >= 2 && b[0] == 2 { - sc.TraceOptions = trace.TraceOptions(b[1]) - } - return sc, true -} - -// HTTPFormat implementations propagate span contexts -// in HTTP requests. -// -// SpanContextFromRequest extracts a span context from incoming -// requests. -// -// SpanContextToRequest modifies the given request to include the given -// span context. -type HTTPFormat interface { - SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) - SpanContextToRequest(sc trace.SpanContext, req *http.Request) -} - -// TODO(jbd): Find a more representative but short name for HTTPFormat. diff --git a/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md new file mode 100644 index 00000000000..773c9b6431f --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md @@ -0,0 +1,27 @@ +# Contributing to go.opentelemetry.io/auto/sdk + +The `go.opentelemetry.io/auto/sdk` module is a purpose built OpenTelemetry SDK. +It is designed to be: + +0. An OpenTelemetry compliant SDK +1. Instrumented by auto-instrumentation (serializable into OTLP JSON) +2. Lightweight +3. User-friendly + +These design choices are listed in the order of their importance. + +The primary design goal of this module is to be an OpenTelemetry SDK. +This means that it needs to implement the Go APIs found in `go.opentelemetry.io/otel`. + +Having met the requirement of SDK compliance, this module needs to provide code that the `go.opentelemetry.io/auto` module can instrument. +The chosen approach to meet this goal is to ensure the telemetry from the SDK is serializable into JSON encoded OTLP. +This ensures then that the serialized form is compatible with other OpenTelemetry systems, and the auto-instrumentation can use these systems to deserialize any telemetry it is sent. + +Outside of these first two goals, the intended use becomes relevant. +This package is intended to be used in the `go.opentelemetry.io/otel` global API as a default when the auto-instrumentation is running. +Because of this, this package needs to not add unnecessary dependencies to that API. +Ideally, it adds none. +It also needs to operate efficiently. + +Finally, this module is designed to be user-friendly to Go development. +It hides complexity in order to provide simpler APIs when the previous goals can all still be met. diff --git a/vendor/go.opentelemetry.io/auto/sdk/LICENSE b/vendor/go.opentelemetry.io/auto/sdk/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md new file mode 100644 index 00000000000..088d19a6ce7 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md @@ -0,0 +1,15 @@ +# Versioning + +This document describes the versioning policy for this module. +This policy is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this module will be idiomatic of a Go project using [Go modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * Any `v2` or higher version of this module will be included as a `/vN` at the end of the module path used in `go.mod` files and in the package import path. + +* GitHub releases will be made for all releases. diff --git a/vendor/go.opentelemetry.io/auto/sdk/doc.go b/vendor/go.opentelemetry.io/auto/sdk/doc.go new file mode 100644 index 00000000000..ad73d8cb9d2 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/doc.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package sdk provides an auto-instrumentable OpenTelemetry SDK. + +An [go.opentelemetry.io/auto.Instrumentation] can be configured to target the +process running this SDK. In that case, all telemetry the SDK produces will be +processed and handled by that [go.opentelemetry.io/auto.Instrumentation]. + +By default, if there is no [go.opentelemetry.io/auto.Instrumentation] set to +auto-instrument the SDK, the SDK will not generate any telemetry. +*/ +package sdk diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go new file mode 100644 index 00000000000..af6ef171f6a --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// Attr is a key-value pair. +type Attr struct { + Key string `json:"key,omitempty"` + Value Value `json:"value,omitempty"` +} + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return Attr{key, StringValue(value)} +} + +// Int64 returns an Attr for an int64 value. +func Int64(key string, value int64) Attr { + return Attr{key, Int64Value(value)} +} + +// Int returns an Attr for an int value. +func Int(key string, value int) Attr { + return Int64(key, int64(value)) +} + +// Float64 returns an Attr for a float64 value. +func Float64(key string, value float64) Attr { + return Attr{key, Float64Value(value)} +} + +// Bool returns an Attr for a bool value. +func Bool(key string, value bool) Attr { + return Attr{key, BoolValue(value)} +} + +// Bytes returns an Attr for a []byte value. +// The passed slice must not be changed after it is passed. +func Bytes(key string, value []byte) Attr { + return Attr{key, BytesValue(value)} +} + +// Slice returns an Attr for a []Value value. +// The passed slice must not be changed after it is passed. +func Slice(key string, value ...Value) Attr { + return Attr{key, SliceValue(value...)} +} + +// Map returns an Attr for a map value. +// The passed slice must not be changed after it is passed. +func Map(key string, value ...Attr) Attr { + return Attr{key, MapValue(value...)} +} + +// Equal returns if a is equal to b. +func (a Attr) Equal(b Attr) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go new file mode 100644 index 00000000000..949e2165c05 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package telemetry provides a lightweight representations of OpenTelemetry +telemetry that is compatible with the OTLP JSON protobuf encoding. +*/ +package telemetry diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go new file mode 100644 index 00000000000..e854d7e84e8 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/hex" + "errors" + "fmt" +) + +const ( + traceIDSize = 16 + spanIDSize = 8 +) + +// TraceID is a custom data type that is used for all trace IDs. +type TraceID [traceIDSize]byte + +// String returns the hex string representation form of a TraceID. +func (tid TraceID) String() string { + return hex.EncodeToString(tid[:]) +} + +// IsEmpty returns false if id contains at least one non-zero byte. +func (tid TraceID) IsEmpty() bool { + return tid == [traceIDSize]byte{} +} + +// MarshalJSON converts the trace ID into a hex string enclosed in quotes. +func (tid TraceID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in +// quotes. +func (tid *TraceID) UnmarshalJSON(data []byte) error { + *tid = [traceIDSize]byte{} + return unmarshalJSON(tid[:], data) +} + +// SpanID is a custom data type that is used for all span IDs. +type SpanID [spanIDSize]byte + +// String returns the hex string representation form of a SpanID. +func (sid SpanID) String() string { + return hex.EncodeToString(sid[:]) +} + +// IsEmpty returns true if the span ID contains at least one non-zero byte. +func (sid SpanID) IsEmpty() bool { + return sid == [spanIDSize]byte{} +} + +// MarshalJSON converts span ID into a hex string enclosed in quotes. +func (sid SpanID) MarshalJSON() ([]byte, error) { + if sid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(sid[:]) +} + +// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. +func (sid *SpanID) UnmarshalJSON(data []byte) error { + *sid = [spanIDSize]byte{} + return unmarshalJSON(sid[:], data) +} + +// marshalJSON converts id into a hex string enclosed in quotes. +func marshalJSON(id []byte) ([]byte, error) { + // Plus 2 quote chars at the start and end. + hexLen := hex.EncodedLen(len(id)) + 2 + + b := make([]byte, hexLen) + hex.Encode(b[1:hexLen-1], id) + b[0], b[hexLen-1] = '"', '"' + + return b, nil +} + +// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +func unmarshalJSON(dst []byte, src []byte) error { + if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { + src = src[1 : l-1] + } + nLen := len(src) + if nLen == 0 { + return nil + } + + if len(dst) != hex.DecodedLen(nLen) { + return errors.New("invalid length for ID") + } + + _, err := hex.Decode(dst, src) + if err != nil { + return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go new file mode 100644 index 00000000000..29e629d6674 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/json" + "strconv" +) + +// protoInt64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoInt64 int64 + +// Int64 returns the protoInt64 as an int64. +func (i *protoInt64) Int64() int64 { return int64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoInt64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedInt, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *i = protoInt64(parsedInt) + } else { + var parsedInt int64 + if err := json.Unmarshal(data, &parsedInt); err != nil { + return err + } + *i = protoInt64(parsedInt) + } + return nil +} + +// protoUint64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoUint64 uint64 + +// Int64 returns the protoUint64 as a uint64. +func (i *protoUint64) Uint64() uint64 { return uint64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoUint64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedUint, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return err + } + *i = protoUint64(parsedUint) + } else { + var parsedUint uint64 + if err := json.Unmarshal(data, &parsedUint); err != nil { + return err + } + *i = protoUint64(parsedUint) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go new file mode 100644 index 00000000000..cecad8bae3c --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Resource information. +type Resource struct { + // Attrs are the set of attributes that describe the resource. Attribute + // keys MUST be unique (it is not allowed to have more than one attribute + // with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // DroppedAttrs is the number of dropped attributes. If the value + // is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (r *Resource) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Resource type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Resource field: %#v", keyIface) + } + + switch key { + case "attributes": + err = decoder.Decode(&r.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&r.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go new file mode 100644 index 00000000000..b6f2e28d408 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Scope is the identifying values of the instrumentation scope. +type Scope struct { + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Attrs []Attr `json:"attributes,omitempty"` + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (s *Scope) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Scope type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Scope field: %#v", keyIface) + } + + switch key { + case "name": + err = decoder.Decode(&s.Name) + case "version": + err = decoder.Decode(&s.Version) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go new file mode 100644 index 00000000000..a13a6b733da --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go @@ -0,0 +1,456 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "time" +) + +// A Span represents a single operation performed by a single component of the +// system. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanID SpanID `json:"spanId,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `json:"traceState,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanID SpanID `json:"parentSpanId,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `json:"name"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind SpanKind `json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTime time.Time `json:"startTimeUnixNano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTime time.Time `json:"endTimeUnixNano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // events is a collection of Event items. + Events []*SpanEvent `json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*SpanLink `json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `json:"status,omitempty"` +} + +// MarshalJSON encodes s into OTLP formatted JSON. +func (s Span) MarshalJSON() ([]byte, error) { + startT := s.StartTime.UnixNano() + if s.StartTime.IsZero() || startT < 0 { + startT = 0 + } + + endT := s.EndTime.UnixNano() + if s.EndTime.IsZero() || endT < 0 { + endT = 0 + } + + // Override non-empty default SpanID marshal and omitempty. + var parentSpanId string + if !s.ParentSpanID.IsEmpty() { + b := make([]byte, hex.EncodedLen(spanIDSize)) + hex.Encode(b, s.ParentSpanID[:]) + parentSpanId = string(b) + } + + type Alias Span + return json.Marshal(struct { + Alias + ParentSpanID string `json:"parentSpanId,omitempty"` + StartTime uint64 `json:"startTimeUnixNano,omitempty"` + EndTime uint64 `json:"endTimeUnixNano,omitempty"` + }{ + Alias: Alias(s), + ParentSpanID: parentSpanId, + StartTime: uint64(startT), + EndTime: uint64(endT), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. +func (s *Span) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Span type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Span field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&s.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&s.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&s.TraceState) + case "parentSpanId", "parent_span_id": + err = decoder.Decode(&s.ParentSpanID) + case "flags": + err = decoder.Decode(&s.Flags) + case "name": + err = decoder.Decode(&s.Name) + case "kind": + err = decoder.Decode(&s.Kind) + case "startTimeUnixNano", "start_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.StartTime = time.Unix(0, int64(val.Uint64())) + case "endTimeUnixNano", "end_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.EndTime = time.Unix(0, int64(val.Uint64())) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + case "events": + err = decoder.Decode(&s.Events) + case "droppedEventsCount", "dropped_events_count": + err = decoder.Decode(&s.DroppedEvents) + case "links": + err = decoder.Decode(&s.Links) + case "droppedLinksCount", "dropped_links_count": + err = decoder.Decode(&s.DroppedLinks) + case "status": + err = decoder.Decode(&s.Status) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // Bits 0-7 are used for trace flags. + SpanFlagsTraceFlagsMask SpanFlags = 255 + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SpanFlagsContextHasIsRemoteMask SpanFlags = 256 + // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + SpanFlagsContextIsRemoteMask SpanFlags = 512 +) + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type SpanKind int32 + +const ( + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SpanKindInternal SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SpanKindServer SpanKind = 2 + // Indicates that the span describes a request to some remote service. + SpanKindClient SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + SpanKindProducer SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + SpanKindConsumer SpanKind = 5 +) + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type SpanEvent struct { + // time_unix_nano is the time the event occurred. + Time time.Time `json:"timeUnixNano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// MarshalJSON encodes e into OTLP formatted JSON. +func (e SpanEvent) MarshalJSON() ([]byte, error) { + t := e.Time.UnixNano() + if e.Time.IsZero() || t < 0 { + t = 0 + } + + type Alias SpanEvent + return json.Marshal(struct { + Alias + Time uint64 `json:"timeUnixNano,omitempty"` + }{ + Alias: Alias(e), + Time: uint64(t), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. +func (se *SpanEvent) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanEvent type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) + } + + switch key { + case "timeUnixNano", "time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + se.Time = time.Unix(0, int64(val.Uint64())) + case "name": + err = decoder.Decode(&se.Name) + case "attributes": + err = decoder.Decode(&se.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&se.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type SpanLink struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanID SpanID `json:"spanId,omitempty"` + // The trace_state associated with the link. + TraceState string `json:"traceState,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. +func (sl *SpanLink) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanLink type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanLink field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&sl.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&sl.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&sl.TraceState) + case "attributes": + err = decoder.Decode(&sl.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&sl.DroppedAttrs) + case "flags": + err = decoder.Decode(&sl.Flags) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go new file mode 100644 index 00000000000..1217776ead1 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type StatusCode int32 + +const ( + // The default status. + StatusCodeUnset StatusCode = 0 + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + StatusCodeOK StatusCode = 1 + // The Span contains an error. + StatusCodeError StatusCode = 2 +) + +var statusCodeStrings = []string{ + "Unset", + "OK", + "Error", +} + +func (s StatusCode) String() string { + if s >= 0 && int(s) < len(statusCodeStrings) { + return statusCodeStrings[s] + } + return "" +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // A developer-facing human readable error message. + Message string `json:"message,omitempty"` + // The status code. + Code StatusCode `json:"code,omitempty"` +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go new file mode 100644 index 00000000000..69a348f0f06 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Traces represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type Traces struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. +func (td *Traces) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid TracesData type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid TracesData field: %#v", keyIface) + } + + switch key { + case "resourceSpans", "resource_spans": + err = decoder.Decode(&td.ResourceSpans) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of ScopeSpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource Resource `json:"resource"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. +func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ResourceSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) + } + + switch key { + case "resource": + err = decoder.Decode(&rs.Resource) + case "scopeSpans", "scope_spans": + err = decoder.Decode(&rs.ScopeSpans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&rs.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *Scope `json:"scope"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. +func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ScopeSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) + } + + switch key { + case "scope": + err = decoder.Decode(&ss.Scope) + case "spans": + err = decoder.Decode(&ss.Spans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&ss.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go new file mode 100644 index 00000000000..0dd01b063a3 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go @@ -0,0 +1,452 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=ValueKind -trimprefix=ValueKind + +package telemetry + +import ( + "bytes" + "cmp" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "slices" + "strconv" + "unsafe" +) + +// A Value represents a structured value. +// A zero value is valid and represents an empty value. +type Value struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + // num holds the value for Int64, Float64, and Bool. It holds the length + // for String, Bytes, Slice, Map. + num uint64 + // any holds either the KindBool, KindInt64, KindFloat64, stringptr, + // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 + // then the value of Value is in num as described above. Otherwise, it + // contains the value wrapped in the appropriate type. + any any +} + +type ( + // sliceptr represents a value in Value.any for KindString Values. + stringptr *byte + // bytesptr represents a value in Value.any for KindBytes Values. + bytesptr *byte + // sliceptr represents a value in Value.any for KindSlice Values. + sliceptr *Value + // mapptr represents a value in Value.any for KindMap Values. + mapptr *Attr +) + +// ValueKind is the kind of a [Value]. +type ValueKind int + +// ValueKind values. +const ( + ValueKindEmpty ValueKind = iota + ValueKindBool + ValueKindFloat64 + ValueKindInt64 + ValueKindString + ValueKindBytes + ValueKindSlice + ValueKindMap +) + +var valueKindStrings = []string{ + "Empty", + "Bool", + "Float64", + "Int64", + "String", + "Bytes", + "Slice", + "Map", +} + +func (k ValueKind) String() string { + if k >= 0 && int(k) < len(valueKindStrings) { + return valueKindStrings[k] + } + return "" +} + +// StringValue returns a new [Value] for a string. +func StringValue(v string) Value { + return Value{ + num: uint64(len(v)), + any: stringptr(unsafe.StringData(v)), + } +} + +// IntValue returns a [Value] for an int. +func IntValue(v int) Value { return Int64Value(int64(v)) } + +// Int64Value returns a [Value] for an int64. +func Int64Value(v int64) Value { + return Value{num: uint64(v), any: ValueKindInt64} +} + +// Float64Value returns a [Value] for a float64. +func Float64Value(v float64) Value { + return Value{num: math.Float64bits(v), any: ValueKindFloat64} +} + +// BoolValue returns a [Value] for a bool. +func BoolValue(v bool) Value { //nolint:revive // Not a control flag. + var n uint64 + if v { + n = 1 + } + return Value{num: n, any: ValueKindBool} +} + +// BytesValue returns a [Value] for a byte slice. The passed slice must not be +// changed after it is passed. +func BytesValue(v []byte) Value { + return Value{ + num: uint64(len(v)), + any: bytesptr(unsafe.SliceData(v)), + } +} + +// SliceValue returns a [Value] for a slice of [Value]. The passed slice must +// not be changed after it is passed. +func SliceValue(vs ...Value) Value { + return Value{ + num: uint64(len(vs)), + any: sliceptr(unsafe.SliceData(vs)), + } +} + +// MapValue returns a new [Value] for a slice of key-value pairs. The passed +// slice must not be changed after it is passed. +func MapValue(kvs ...Attr) Value { + return Value{ + num: uint64(len(kvs)), + any: mapptr(unsafe.SliceData(kvs)), + } +} + +// AsString returns the value held by v as a string. +func (v Value) AsString() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + // TODO: error handle + return "" +} + +// asString returns the value held by v as a string. It will panic if the Value +// is not KindString. +func (v Value) asString() string { + return unsafe.String(v.any.(stringptr), v.num) +} + +// AsInt64 returns the value held by v as an int64. +func (v Value) AsInt64() int64 { + if v.Kind() != ValueKindInt64 { + // TODO: error handle + return 0 + } + return v.asInt64() +} + +// asInt64 returns the value held by v as an int64. If v is not of KindInt64, +// this will return garbage. +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} + +// AsBool returns the value held by v as a bool. +func (v Value) AsBool() bool { + if v.Kind() != ValueKindBool { + // TODO: error handle + return false + } + return v.asBool() +} + +// asBool returns the value held by v as a bool. If v is not of KindBool, this +// will return garbage. +func (v Value) asBool() bool { return v.num == 1 } + +// AsFloat64 returns the value held by v as a float64. +func (v Value) AsFloat64() float64 { + if v.Kind() != ValueKindFloat64 { + // TODO: error handle + return 0 + } + return v.asFloat64() +} + +// asFloat64 returns the value held by v as a float64. If v is not of +// KindFloat64, this will return garbage. +func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } + +// AsBytes returns the value held by v as a []byte. +func (v Value) AsBytes() []byte { + if sp, ok := v.any.(bytesptr); ok { + return unsafe.Slice((*byte)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asBytes returns the value held by v as a []byte. It will panic if the Value +// is not KindBytes. +func (v Value) asBytes() []byte { + return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) +} + +// AsSlice returns the value held by v as a []Value. +func (v Value) AsSlice() []Value { + if sp, ok := v.any.(sliceptr); ok { + return unsafe.Slice((*Value)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asSlice returns the value held by v as a []Value. It will panic if the Value +// is not KindSlice. +func (v Value) asSlice() []Value { + return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) +} + +// AsMap returns the value held by v as a []Attr. +func (v Value) AsMap() []Attr { + if sp, ok := v.any.(mapptr); ok { + return unsafe.Slice((*Attr)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asMap returns the value held by v as a []Attr. It will panic if the +// Value is not KindMap. +func (v Value) asMap() []Attr { + return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) +} + +// Kind returns the Kind of v. +func (v Value) Kind() ValueKind { + switch x := v.any.(type) { + case ValueKind: + return x + case stringptr: + return ValueKindString + case bytesptr: + return ValueKindBytes + case sliceptr: + return ValueKindSlice + case mapptr: + return ValueKindMap + default: + return ValueKindEmpty + } +} + +// Empty returns if v does not hold any value. +func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } + +// Equal returns if v is equal to w. +func (v Value) Equal(w Value) bool { + k1 := v.Kind() + k2 := w.Kind() + if k1 != k2 { + return false + } + switch k1 { + case ValueKindInt64, ValueKindBool: + return v.num == w.num + case ValueKindString: + return v.asString() == w.asString() + case ValueKindFloat64: + return v.asFloat64() == w.asFloat64() + case ValueKindSlice: + return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) + case ValueKindMap: + sv := sortMap(v.asMap()) + sw := sortMap(w.asMap()) + return slices.EqualFunc(sv, sw, Attr.Equal) + case ValueKindBytes: + return bytes.Equal(v.asBytes(), w.asBytes()) + case ValueKindEmpty: + return true + default: + // TODO: error handle + return false + } +} + +func sortMap(m []Attr) []Attr { + sm := make([]Attr, len(m)) + copy(sm, m) + slices.SortFunc(sm, func(a, b Attr) int { + return cmp.Compare(a.Key, b.Key) + }) + + return sm +} + +// String returns Value's value as a string, formatted like [fmt.Sprint]. +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (v Value) String() string { + switch v.Kind() { + case ValueKindString: + return v.asString() + case ValueKindInt64: + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + case ValueKindFloat64: + return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) + case ValueKindBool: + return strconv.FormatBool(v.asBool()) + case ValueKindBytes: + return fmt.Sprint(v.asBytes()) + case ValueKindMap: + return fmt.Sprint(v.asMap()) + case ValueKindSlice: + return fmt.Sprint(v.asSlice()) + case ValueKindEmpty: + return "" + default: + // Try to handle this as gracefully as possible. + // + // Don't panic here. The goal here is to have developers find this + // first if a slog.Kind is is not handled. It is + // preferable to have user's open issue asking why their attributes + // have a "unhandled: " prefix than say that their code is panicking. + return fmt.Sprintf("", v.Kind()) + } +} + +// MarshalJSON encodes v into OTLP formatted JSON. +func (v *Value) MarshalJSON() ([]byte, error) { + switch v.Kind() { + case ValueKindString: + return json.Marshal(struct { + Value string `json:"stringValue"` + }{v.asString()}) + case ValueKindInt64: + return json.Marshal(struct { + Value string `json:"intValue"` + }{strconv.FormatInt(int64(v.num), 10)}) + case ValueKindFloat64: + return json.Marshal(struct { + Value float64 `json:"doubleValue"` + }{v.asFloat64()}) + case ValueKindBool: + return json.Marshal(struct { + Value bool `json:"boolValue"` + }{v.asBool()}) + case ValueKindBytes: + return json.Marshal(struct { + Value []byte `json:"bytesValue"` + }{v.asBytes()}) + case ValueKindMap: + return json.Marshal(struct { + Value struct { + Values []Attr `json:"values"` + } `json:"kvlistValue"` + }{struct { + Values []Attr `json:"values"` + }{v.asMap()}}) + case ValueKindSlice: + return json.Marshal(struct { + Value struct { + Values []Value `json:"values"` + } `json:"arrayValue"` + }{struct { + Values []Value `json:"values"` + }{v.asSlice()}}) + case ValueKindEmpty: + return nil, nil + default: + return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) + } +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. +func (v *Value) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Value type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Value key: %#v", keyIface) + } + + switch key { + case "stringValue", "string_value": + var val string + err = decoder.Decode(&val) + *v = StringValue(val) + case "boolValue", "bool_value": + var val bool + err = decoder.Decode(&val) + *v = BoolValue(val) + case "intValue", "int_value": + var val protoInt64 + err = decoder.Decode(&val) + *v = Int64Value(val.Int64()) + case "doubleValue", "double_value": + var val float64 + err = decoder.Decode(&val) + *v = Float64Value(val) + case "bytesValue", "bytes_value": + var val64 string + if err := decoder.Decode(&val64); err != nil { + return err + } + var val []byte + val, err = base64.StdEncoding.DecodeString(val64) + *v = BytesValue(val) + case "arrayValue", "array_value": + var val struct{ Values []Value } + err = decoder.Decode(&val) + *v = SliceValue(val.Values...) + case "kvlistValue", "kvlist_value": + var val struct{ Values []Attr } + err = decoder.Decode(&val) + *v = MapValue(val.Values...) + default: + // Skip unknown. + continue + } + // Use first valid. Ignore the rest. + return err + } + + // Only unknown fields. Return nil without unmarshaling any value. + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/limit.go b/vendor/go.opentelemetry.io/auto/sdk/limit.go new file mode 100644 index 00000000000..86babf1a885 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/limit.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "log/slog" + "os" + "strconv" +) + +// maxSpan are the span limits resolved during startup. +var maxSpan = newSpanLimits() + +type spanLimits struct { + // Attrs is the number of allowed attributes for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if + // that is not set, is used. + Attrs int + // AttrValueLen is the maximum attribute value length allowed for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 + // if that is not set, is used. + AttrValueLen int + // Events is the number of allowed events for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. + Events int + // EventAttrs is the number of allowed attributes for a span event. + // + // The is resolved from the environment variable value for the + // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. + EventAttrs int + // Links is the number of allowed Links for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. + Links int + // LinkAttrs is the number of allowed attributes for a span link. + // + // This is resolved from the environment variable value for the + // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. + LinkAttrs int +} + +func newSpanLimits() spanLimits { + return spanLimits{ + Attrs: firstEnv( + 128, + "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", + "OTEL_ATTRIBUTE_COUNT_LIMIT", + ), + AttrValueLen: firstEnv( + -1, // Unlimited. + "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", + "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", + ), + Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), + EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), + Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), + LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), + } +} + +// firstEnv returns the parsed integer value of the first matching environment +// variable from keys. The defaultVal is returned if the value is not an +// integer or no match is found. +func firstEnv(defaultVal int, keys ...string) int { + for _, key := range keys { + strV := os.Getenv(key) + if strV == "" { + continue + } + + v, err := strconv.Atoi(strV) + if err == nil { + return v + } + slog.Warn( + "invalid limit environment variable", + "error", err, + "key", key, + "value", strV, + ) + } + + return defaultVal +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go new file mode 100644 index 00000000000..6ebea12a9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/span.go @@ -0,0 +1,432 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "encoding/json" + "fmt" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type span struct { + noop.Span + + spanContext trace.SpanContext + sampled atomic.Bool + + mu sync.Mutex + traces *telemetry.Traces + span *telemetry.Span +} + +func (s *span) SpanContext() trace.SpanContext { + if s == nil { + return trace.SpanContext{} + } + // s.spanContext is immutable, do not acquire lock s.mu. + return s.spanContext +} + +func (s *span) IsRecording() bool { + if s == nil { + return false + } + + return s.sampled.Load() +} + +func (s *span) SetStatus(c codes.Code, msg string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + if s.span.Status == nil { + s.span.Status = new(telemetry.Status) + } + + s.span.Status.Message = msg + + switch c { + case codes.Unset: + s.span.Status.Code = telemetry.StatusCodeUnset + case codes.Error: + s.span.Status.Code = telemetry.StatusCodeError + case codes.Ok: + s.span.Status.Code = telemetry.StatusCodeOK + } +} + +func (s *span) SetAttributes(attrs ...attribute.KeyValue) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := maxSpan.Attrs + if limit == 0 { + // No attributes allowed. + s.span.DroppedAttrs += uint32(len(attrs)) + return + } + + m := make(map[string]int) + for i, a := range s.span.Attrs { + m[a.Key] = i + } + + for _, a := range attrs { + val := convAttrValue(a.Value) + if val.Empty() { + s.span.DroppedAttrs++ + continue + } + + if idx, ok := m[string(a.Key)]; ok { + s.span.Attrs[idx] = telemetry.Attr{ + Key: string(a.Key), + Value: val, + } + } else if limit < 0 || len(s.span.Attrs) < limit { + s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ + Key: string(a.Key), + Value: val, + }) + m[string(a.Key)] = len(s.span.Attrs) - 1 + } else { + s.span.DroppedAttrs++ + } + } +} + +// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The +// number of dropped attributes is also returned. +func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + if limit == 0 { + return nil, uint32(len(attrs)) + } + + if limit < 0 { + // Unlimited. + return convAttrs(attrs), 0 + } + + limit = min(len(attrs), limit) + return convAttrs(attrs[:limit]), uint32(len(attrs) - limit) +} + +func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { + if len(attrs) == 0 { + // Avoid allocations if not necessary. + return nil + } + + out := make([]telemetry.Attr, 0, len(attrs)) + for _, attr := range attrs { + key := string(attr.Key) + val := convAttrValue(attr.Value) + if val.Empty() { + continue + } + out = append(out, telemetry.Attr{Key: key, Value: val}) + } + return out +} + +func convAttrValue(value attribute.Value) telemetry.Value { + switch value.Type() { + case attribute.BOOL: + return telemetry.BoolValue(value.AsBool()) + case attribute.INT64: + return telemetry.Int64Value(value.AsInt64()) + case attribute.FLOAT64: + return telemetry.Float64Value(value.AsFloat64()) + case attribute.STRING: + v := truncate(maxSpan.AttrValueLen, value.AsString()) + return telemetry.StringValue(v) + case attribute.BOOLSLICE: + slice := value.AsBoolSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.BoolValue(v)) + } + return telemetry.SliceValue(out...) + case attribute.INT64SLICE: + slice := value.AsInt64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Int64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.FLOAT64SLICE: + slice := value.AsFloat64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Float64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.STRINGSLICE: + slice := value.AsStringSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + v = truncate(maxSpan.AttrValueLen, v) + out = append(out, telemetry.StringValue(v)) + } + return telemetry.SliceValue(out...) + } + return telemetry.Value{} +} + +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s + } + + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue + } + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ + } + + return b.String() +} + +func (s *span) End(opts ...trace.SpanEndOption) { + if s == nil || !s.sampled.Swap(false) { + return + } + + // s.end exists so the lock (s.mu) is not held while s.ended is called. + s.ended(s.end(opts)) +} + +func (s *span) end(opts []trace.SpanEndOption) []byte { + s.mu.Lock() + defer s.mu.Unlock() + + cfg := trace.NewSpanEndConfig(opts...) + if t := cfg.Timestamp(); !t.IsZero() { + s.span.EndTime = cfg.Timestamp() + } else { + s.span.EndTime = time.Now() + } + + b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. + return b +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (*span) ended(buf []byte) { ended(buf) } + +// ended is used for testing. +var ended = func([]byte) {} + +func (s *span) RecordError(err error, opts ...trace.EventOption) { + if s == nil || err == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + attrs := cfg.Attributes() + attrs = append(attrs, + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + ) + if cfg.StackTrace() { + buf := make([]byte, 2048) + n := runtime.Stack(buf, false) + attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) +} + +func typeStr(i any) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func (s *span) AddEvent(name string, opts ...trace.EventOption) { + if s == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) +} + +// addEvent adds an event with name and attrs at tStamp to the span. The span +// lock (s.mu) needs to be held by the caller. +func (s *span) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { + limit := maxSpan.Events + + if limit == 0 { + s.span.DroppedEvents++ + return + } + + if limit > 0 && len(s.span.Events) == limit { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Events[:limit-1], s.span.Events[1:]) + s.span.Events = s.span.Events[:limit-1] + s.span.DroppedEvents++ + } + + e := &telemetry.SpanEvent{Time: tStamp, Name: name} + e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) + + s.span.Events = append(s.span.Events, e) +} + +func (s *span) AddLink(link trace.Link) { + if s == nil || !s.sampled.Load() { + return + } + + l := maxSpan.Links + + s.mu.Lock() + defer s.mu.Unlock() + + if l == 0 { + s.span.DroppedLinks++ + return + } + + if l > 0 && len(s.span.Links) == l { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Links[:l-1], s.span.Links[1:]) + s.span.Links = s.span.Links[:l-1] + s.span.DroppedLinks++ + } + + s.span.Links = append(s.span.Links, convLink(link)) +} + +func convLinks(links []trace.Link) []*telemetry.SpanLink { + out := make([]*telemetry.SpanLink, 0, len(links)) + for _, link := range links { + out = append(out, convLink(link)) + } + return out +} + +func convLink(link trace.Link) *telemetry.SpanLink { + l := &telemetry.SpanLink{ + TraceID: telemetry.TraceID(link.SpanContext.TraceID()), + SpanID: telemetry.SpanID(link.SpanContext.SpanID()), + TraceState: link.SpanContext.TraceState().String(), + Flags: uint32(link.SpanContext.TraceFlags()), + } + l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) + + return l +} + +func (s *span) SetName(name string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.span.Name = name +} + +func (*span) TracerProvider() trace.TracerProvider { return TracerProvider() } diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go new file mode 100644 index 00000000000..cbcfabde3b1 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go @@ -0,0 +1,124 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type tracer struct { + noop.Tracer + + name, schemaURL, version string +} + +var _ trace.Tracer = tracer{} + +func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + var psc trace.SpanContext + sampled := true + span := new(span) + + // Ask eBPF for sampling decision and span context info. + t.start(ctx, span, &psc, &sampled, &span.spanContext) + + span.sampled.Store(sampled) + + ctx = trace.ContextWithSpan(ctx, span) + + if sampled { + // Only build traces if sampled. + cfg := trace.NewSpanStartConfig(opts...) + span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) + } + + return ctx, span +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (t *tracer) start( + ctx context.Context, + spanPtr *span, + psc *trace.SpanContext, + sampled *bool, + sc *trace.SpanContext, +) { + start(ctx, spanPtr, psc, sampled, sc) +} + +// start is used for testing. +var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} + +func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) { + span := &telemetry.Span{ + TraceID: telemetry.TraceID(sc.TraceID()), + SpanID: telemetry.SpanID(sc.SpanID()), + Flags: uint32(sc.TraceFlags()), + TraceState: sc.TraceState().String(), + ParentSpanID: telemetry.SpanID(psc.SpanID()), + Name: name, + Kind: spanKind(cfg.SpanKind()), + } + + span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) + + links := cfg.Links() + if limit := maxSpan.Links; limit == 0 { + span.DroppedLinks = uint32(len(links)) + } else { + if limit > 0 { + n := max(len(links)-limit, 0) + span.DroppedLinks = uint32(n) + links = links[n:] + } + span.Links = convLinks(links) + } + + if t := cfg.Timestamp(); !t.IsZero() { + span.StartTime = cfg.Timestamp() + } else { + span.StartTime = time.Now() + } + + return &telemetry.Traces{ + ResourceSpans: []*telemetry.ResourceSpans{ + { + ScopeSpans: []*telemetry.ScopeSpans{ + { + Scope: &telemetry.Scope{ + Name: t.name, + Version: t.version, + }, + Spans: []*telemetry.Span{span}, + SchemaURL: t.schemaURL, + }, + }, + }, + }, + }, span +} + +func spanKind(kind trace.SpanKind) telemetry.SpanKind { + switch kind { + case trace.SpanKindInternal: + return telemetry.SpanKindInternal + case trace.SpanKindServer: + return telemetry.SpanKindServer + case trace.SpanKindClient: + return telemetry.SpanKindClient + case trace.SpanKindProducer: + return telemetry.SpanKindProducer + case trace.SpanKindConsumer: + return telemetry.SpanKindConsumer + } + return telemetry.SpanKind(0) // undefined. +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go new file mode 100644 index 00000000000..dbc477a59ad --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" +) + +// TracerProvider returns an auto-instrumentable [trace.TracerProvider]. +// +// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument +// the process using the returned TracerProvider, all of the telemetry it +// produces will be processed and handled by that Instrumentation. By default, +// if no Instrumentation instruments the TracerProvider it will not generate +// any trace telemetry. +func TracerProvider() trace.TracerProvider { return tracerProviderInstance } + +var tracerProviderInstance = new(tracerProvider) + +type tracerProvider struct{ noop.TracerProvider } + +var _ trace.TracerProvider = tracerProvider{} + +func (p tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + cfg := trace.NewTracerConfig(opts...) + return tracer{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + } +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go index d14cc52ff7a..cf7e9db36bf 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go @@ -5,7 +5,7 @@ package otelhttptrace // import "go.opentelemetry.io/contrib/instrumentation/net // Version is the current release version of the httptrace instrumentation. func Version() string { - return "0.57.0" + return "0.58.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index 6aae83bfd20..b25641c55d3 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -18,7 +18,7 @@ var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} // Get is a convenient replacement for http.Get that adds a span around the request. func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil) if err != nil { return nil, err } @@ -27,7 +27,7 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) // Head is a convenient replacement for http.Head that adds a span around the request. func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil) if err != nil { return nil, err } @@ -36,7 +36,7 @@ func Head(ctx context.Context, targetURL string) (resp *http.Response, err error // Post is a convenient replacement for http.Post that adds a span around the request. func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, targetURL, body) if err != nil { return nil, err } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index fb893b25042..3b036f8a37b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -9,6 +9,7 @@ import ( "net/http" "os" "strings" + "sync" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" @@ -50,9 +51,9 @@ type HTTPServer struct { // The req Host will be used to determine the server instead. func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { if s.duplicate { - return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + return append(OldHTTPServer{}.RequestTraceAttrs(server, req), CurrentHTTPServer{}.RequestTraceAttrs(server, req)...) } - return oldHTTPServer{}.RequestTraceAttrs(server, req) + return OldHTTPServer{}.RequestTraceAttrs(server, req) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. @@ -60,14 +61,14 @@ func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attrib // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { if s.duplicate { - return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + return append(OldHTTPServer{}.ResponseTraceAttrs(resp), CurrentHTTPServer{}.ResponseTraceAttrs(resp)...) } - return oldHTTPServer{}.ResponseTraceAttrs(resp) + return OldHTTPServer{}.ResponseTraceAttrs(resp) } // Route returns the attribute for the route. func (s HTTPServer) Route(route string) attribute.KeyValue { - return oldHTTPServer{}.Route(route) + return OldHTTPServer{}.Route(route) } // Status returns a span status code and message for an HTTP status code @@ -102,18 +103,27 @@ type MetricData struct { ElapsedTime float64 } +var metricAddOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.AddOption{} + }, +} + func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { - // This will happen if an HTTPServer{} is used insted of NewHTTPServer. + // This will happen if an HTTPServer{} is used instead of NewHTTPServer. return } - attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := []metric.AddOption{o} - s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) - s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) + addOpts := metricAddOptionPool.Get().(*[]metric.AddOption) + *addOpts = append(*addOpts, o) + s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...) s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + *addOpts = (*addOpts)[:0] + metricAddOptionPool.Put(addOpts) // TODO: Duplicate Metrics } @@ -124,7 +134,7 @@ func NewHTTPServer(meter metric.Meter) HTTPServer { server := HTTPServer{ duplicate: duplicate, } - server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter) return server } @@ -142,25 +152,25 @@ func NewHTTPClient(meter metric.Meter) HTTPClient { client := HTTPClient{ duplicate: env == "http/dup", } - client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = oldHTTPClient{}.createMeasures(meter) + client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter) return client } // RequestTraceAttrs returns attributes for an HTTP request made by a client. func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { if c.duplicate { - return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) + return append(OldHTTPClient{}.RequestTraceAttrs(req), CurrentHTTPClient{}.RequestTraceAttrs(req)...) } - return oldHTTPClient{}.RequestTraceAttrs(req) + return OldHTTPClient{}.RequestTraceAttrs(req) } // ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { if c.duplicate { - return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) + return append(OldHTTPClient{}.ResponseTraceAttrs(resp), CurrentHTTPClient{}.ResponseTraceAttrs(resp)...) } - return oldHTTPClient{}.ResponseTraceAttrs(resp) + return OldHTTPClient{}.ResponseTraceAttrs(resp) } func (c HTTPClient) Status(code int) (codes.Code, string) { @@ -175,7 +185,7 @@ func (c HTTPClient) Status(code int) (codes.Code, string) { func (c HTTPClient) ErrorType(err error) attribute.KeyValue { if c.duplicate { - return newHTTPClient{}.ErrorType(err) + return CurrentHTTPClient{}.ErrorType(err) } return attribute.KeyValue{} @@ -195,7 +205,7 @@ func (o MetricOpts) AddOptions() metric.AddOption { } func (c HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts { - attributes := oldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) // TODO: Duplicate Metrics set := metric.WithAttributeSet(attribute.NewSet(attributes...)) return MetricOpts{ @@ -206,7 +216,7 @@ func (c HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts { func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts MetricOpts) { if s.requestBytesCounter == nil || s.latencyMeasure == nil { - // This will happen if an HTTPClient{} is used insted of NewHTTPClient(). + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). return } @@ -218,7 +228,7 @@ func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts Metri func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts metric.AddOption) { if s.responseBytesCounter == nil { - // This will happen if an HTTPClient{} is used insted of NewHTTPClient(). + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). return } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go index 745b8c67bc4..dc9ec7bc39e 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -14,7 +14,7 @@ import ( semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) -type newHTTPServer struct{} +type CurrentHTTPServer struct{} // TraceRequest returns trace attributes for an HTTP request received by a // server. @@ -32,18 +32,18 @@ type newHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { count := 3 // ServerAddress, Method, Scheme var host string var p int if server == "" { - host, p = splitHostPort(req.Host) + host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. - host, p = splitHostPort(server) + host, p = SplitHostPort(server) if p < 0 { - _, p = splitHostPort(req.Host) + _, p = SplitHostPort(req.Host) } } @@ -59,7 +59,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att scheme := n.scheme(req.TLS != nil) - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. count++ @@ -104,7 +104,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att attrs = append(attrs, methodOriginal) } - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) @@ -135,7 +135,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att return attrs } -func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { +func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } @@ -150,7 +150,7 @@ func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyV return semconvNew.HTTPRequestMethodGet, orig } -func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconvNew.URLScheme("https") } @@ -160,7 +160,7 @@ func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive // TraceResponse returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { var count int if resp.ReadBytes > 0 { @@ -195,14 +195,14 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -func (n newHTTPServer) Route(route string) attribute.KeyValue { +func (n CurrentHTTPServer) Route(route string) attribute.KeyValue { return semconvNew.HTTPRoute(route) } -type newHTTPClient struct{} +type CurrentHTTPClient struct{} // RequestTraceAttrs returns trace attributes for an HTTP request made by a client. -func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { +func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { /* below attributes are returned: - http.request.method @@ -222,7 +222,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue var requestHost string var requestPort int for _, hostport := range []string{urlHost, req.Header.Get("Host")} { - requestHost, requestPort = splitHostPort(hostport) + requestHost, requestPort = SplitHostPort(hostport) if requestHost != "" || requestPort > 0 { break } @@ -284,7 +284,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue } // ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. -func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { +func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { /* below attributes are returned: - http.response.status_code @@ -311,7 +311,7 @@ func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyVa return attrs } -func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { +func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue { t := reflect.TypeOf(err) var value string if t.PkgPath() == "" && t.Name() == "" { @@ -328,7 +328,7 @@ func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { return semconvNew.ErrorTypeKey.String(value) } -func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { +func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index e6e14924f57..93e8d0f94c1 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -14,14 +14,14 @@ import ( semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) -// splitHostPort splits a network address hostport of the form "host", +// SplitHostPort splits a network address hostport of the form "host", // "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", // "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and // port. // // An empty host is returned if it is not provided or unparsable. A negative // port is returned if it is not provided or unparsable. -func splitHostPort(hostport string) (host string, port int) { +func SplitHostPort(hostport string) (host string, port int) { port = -1 if strings.HasPrefix(hostport, "[") { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index 5367732ec5d..c042249dd72 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -17,7 +17,7 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) -type oldHTTPServer struct{} +type OldHTTPServer struct{} // RequestTraceAttrs returns trace attributes for an HTTP request received by a // server. @@ -35,14 +35,14 @@ type oldHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { return semconvutil.HTTPServerRequest(server, req) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { attributes := []attribute.KeyValue{} if resp.ReadBytes > 0 { @@ -67,7 +67,7 @@ func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -func (o oldHTTPServer) Route(route string) attribute.KeyValue { +func (o OldHTTPServer) Route(route string) attribute.KeyValue { return semconv.HTTPRoute(route) } @@ -84,7 +84,7 @@ const ( serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds ) -func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { +func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { if meter == nil { return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} } @@ -113,17 +113,17 @@ func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, return requestBytesCounter, responseBytesCounter, serverLatencyMeasure } -func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { +func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { n := len(additionalAttributes) + 3 var host string var p int if server == "" { - host, p = splitHostPort(req.Host) + host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. - host, p = splitHostPort(server) + host, p = SplitHostPort(server) if p < 0 { - _, p = splitHostPort(req.Host) + _, p = SplitHostPort(req.Host) } } hostPort := requiredHTTPPort(req.TLS != nil, p) @@ -164,24 +164,24 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status return attributes } -func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconv.HTTPSchemeHTTPS } return semconv.HTTPSchemeHTTP } -type oldHTTPClient struct{} +type OldHTTPClient struct{} -func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { +func (o OldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { return semconvutil.HTTPClientRequest(req) } -func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { +func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { return semconvutil.HTTPClientResponse(resp) } -func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { +func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { /* The following semantic conventions are returned if present: http.method string http.status_code int @@ -197,7 +197,7 @@ func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, addit var requestHost string var requestPort int for _, hostport := range []string{h, req.Header.Get("Host")} { - requestHost, requestPort = splitHostPort(hostport) + requestHost, requestPort = SplitHostPort(hostport) if requestHost != "" || requestPort > 0 { break } @@ -235,7 +235,7 @@ const ( clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds ) -func (o oldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { +func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { if meter == nil { return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 16ef3cb9b94..353e43b91fd 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.57.0" + return "0.58.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index dbfb2a165a0..ce3f40b609c 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -22,6 +22,7 @@ linters: - govet - ineffassign - misspell + - perfsprint - revive - staticcheck - tenv @@ -30,6 +31,7 @@ linters: - unconvert - unused - unparam + - usestdlibvars issues: # Maximum issues count per one linter. @@ -61,10 +63,11 @@ issues: text: "calls to (.+) only in main[(][)] or init[(][)] functions" linters: - revive - # It's okay to not run gosec in a test. + # It's okay to not run gosec and perfsprint in a test. - path: _test\.go linters: - gosec + - perfsprint # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) # as we commonly use it in tests and examples. - text: "G404:" @@ -95,6 +98,13 @@ linters-settings: - pkg: "crypto/md5" - pkg: "crypto/sha1" - pkg: "crypto/**/pkix" + auto/sdk: + files: + - "!internal/global/trace.go" + - "~internal/global/trace_test.go" + deny: + - pkg: "go.opentelemetry.io/auto/sdk" + desc: Do not use SDK from automatic instrumentation. otlp-internal: files: - "!**/exporters/otlp/internal/**/*.go" @@ -154,6 +164,12 @@ linters-settings: locale: US ignore-words: - cancelled + perfsprint: + err-error: true + errorf: true + int-conversion: true + sprintf1: true + strconcat: true revive: # Sets the default failure confidence. # This means that linting errors with less than 0.8 confidence will be ignored. diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 8f68dbd04ae..a30988f25d0 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,35 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12 + +### Added + +- Add `Reset` method to `SpanRecorder` in `go.opentelemetry.io/otel/sdk/trace/tracetest`. (#5994) +- Add `EnabledInstrument` interface in `go.opentelemetry.io/otel/sdk/metric/internal/x`. + This is an experimental interface that is implemented by synchronous instruments provided by `go.opentelemetry.io/otel/sdk/metric`. + Users can use it to avoid performing computationally expensive operations when recording measurements. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#6016) + +### Changed + +- The default global API now supports full auto-instrumentation from the `go.opentelemetry.io/auto` package. + See that package for more information. (#5920) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5929) +- Performance improvements for attribute value `AsStringSlice`, `AsFloat64Slice`, `AsInt64Slice`, `AsBoolSlice`. (#6011) +- Change `EnabledParameters` to have a `Severity` field instead of a getter and setter in `go.opentelemetry.io/otel/log`. (#6009) + +### Fixed + +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5954) +- Fix invalid exemplar keys in `go.opentelemetry.io/otel/exporters/prometheus`. (#5995) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032) + @@ -3156,7 +3185,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.32.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.33.0...HEAD +[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 [1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 [1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 [1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index b8292a4fb91..a7f6d8cc688 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -14,8 +14,8 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default -ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage +precommit: generate toolchain-check license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate toolchain-check license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage # Tools @@ -235,6 +235,16 @@ govulncheck/%: $(GOVULNCHECK) codespell: $(CODESPELL) @$(DOCKERPY) $(CODESPELL) +.PHONY: toolchain-check +toolchain-check: + @toolchainRes=$$(for f in $(ALL_GO_MOD_DIRS); do \ + awk '/^toolchain/ { found=1; next } END { if (found) print FILENAME }' $$f/go.mod; \ + done); \ + if [ -n "$${toolchainRes}" ]; then \ + echo "toolchain checking failed:"; echo "$${toolchainRes}"; \ + exit 1; \ + fi + .PHONY: license-check license-check: @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md index 412f1e362bb..b8cb605c166 100644 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -26,7 +26,7 @@ is designed so the following goals can be achieved. go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the paths used in `go get` commands (e.g., `go get - go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + go.opentelemetry.io/otel/v2@v2.0.1`). Note there is both a `/v2` and a `@v2.0.1` in that example. One way to think about it is that the module name now includes the `/v2`, so include `/v2` whenever you are using the module name). diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 36f5367030c..0e1fe242203 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -355,7 +355,7 @@ func parseMember(member string) (Member, error) { } // replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. -func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { +func replaceInvalidUTF8Sequences(c int, unescapeVal string) string { if utf8.ValidString(unescapeVal) { return unescapeVal } @@ -363,7 +363,7 @@ func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 var b strings.Builder - b.Grow(cap) + b.Grow(c) for i := 0; i < len(unescapeVal); { r, size := utf8.DecodeRuneInString(unescapeVal[i:]) if r == utf8.RuneError && size == 1 { diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 2acbac35466..49a35b12255 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -5,6 +5,7 @@ package codes // import "go.opentelemetry.io/otel/codes" import ( "encoding/json" + "errors" "fmt" "strconv" ) @@ -63,7 +64,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return nil } if c == nil { - return fmt.Errorf("nil receiver passed to UnmarshalJSON") + return errors.New("nil receiver passed to UnmarshalJSON") } var x interface{} diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index 822d8479474..691d96c7554 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -49,12 +49,11 @@ func AsBoolSlice(v interface{}) []bool { if rv.Type().Kind() != reflect.Array { return nil } - var zero bool - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]bool) + cpy := make([]bool, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsInt64Slice converts an int64 array into a slice into with same elements as array. @@ -63,12 +62,11 @@ func AsInt64Slice(v interface{}) []int64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero int64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]int64) + cpy := make([]int64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsFloat64Slice converts a float64 array into a slice into with same elements as array. @@ -77,12 +75,11 @@ func AsFloat64Slice(v interface{}) []float64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero float64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]float64) + cpy := make([]float64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsStringSlice converts a string array into a slice into with same elements as array. @@ -91,10 +88,9 @@ func AsStringSlice(v interface{}) []string { if rv.Type().Kind() != reflect.Array { return nil } - var zero string - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]string) + cpy := make([]string, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index ac65262c656..8982aa0dc56 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -25,6 +25,7 @@ import ( "sync" "sync/atomic" + "go.opentelemetry.io/auto/sdk" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" @@ -145,6 +146,30 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart return delegate.(trace.Tracer).Start(ctx, name, opts...) } + return t.newSpan(ctx, autoInstEnabled, name, opts) +} + +// autoInstEnabled determines if the auto-instrumentation SDK span is returned +// from the tracer when not backed by a delegate and auto-instrumentation has +// attached to this process. +// +// The auto-instrumentation is expected to overwrite this value to true when it +// attaches. By default, this will point to false and mean a tracer will return +// a nonRecordingSpan by default. +var autoInstEnabled = new(bool) + +func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) { + // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is + // so the auto-instrumentation can define a uprobe for (*t).newSpan and be + // provided with the address of the bool autoInstEnabled points to. It + // needs to be a parameter so that pointer can be reliably determined, it + // should not be read from the global. + + if *autoSpan { + tracer := sdk.TracerProvider().Tracer(t.name, t.opts...) + return tracer.Start(ctx, name, opts...) + } + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} ctx = trace.ContextWithSpan(ctx, s) return ctx, s diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index 273d58e0014..9c0b720a4d6 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -213,7 +213,7 @@ var _ SpanStartEventOption = attributeOption{} // WithAttributes adds the attributes related to a span life-cycle event. // These attributes are used to describe the work a Span represents when this -// option is provided to a Span's start or end events. Otherwise, these +// option is provided to a Span's start event. Otherwise, these // attributes provide additional information about the event being recorded // (e.g. error, state change, processing progress, system event). // diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/README.md b/vendor/go.opentelemetry.io/otel/trace/noop/README.md new file mode 100644 index 00000000000..cd382c82a1a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop/README.md @@ -0,0 +1,3 @@ +# Trace Noop + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/noop) diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go new file mode 100644 index 00000000000..64a4f1b362f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go @@ -0,0 +1,112 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package noop provides an implementation of the OpenTelemetry trace API that +// produces no telemetry and minimizes used computation resources. +// +// Using this package to implement the OpenTelemetry trace API will effectively +// disable OpenTelemetry. +// +// This implementation can be embedded in other implementations of the +// OpenTelemetry trace API. Doing so will mean the implementation defaults to +// no operation for methods it does not implement. +package noop // import "go.opentelemetry.io/otel/trace/noop" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +var ( + // Compile-time check this implements the OpenTelemetry API. + + _ trace.TracerProvider = TracerProvider{} + _ trace.Tracer = Tracer{} + _ trace.Span = Span{} +) + +// TracerProvider is an OpenTelemetry No-Op TracerProvider. +type TracerProvider struct{ embedded.TracerProvider } + +// NewTracerProvider returns a TracerProvider that does not record any telemetry. +func NewTracerProvider() TracerProvider { + return TracerProvider{} +} + +// Tracer returns an OpenTelemetry Tracer that does not record any telemetry. +func (TracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer { + return Tracer{} +} + +// Tracer is an OpenTelemetry No-Op Tracer. +type Tracer struct{ embedded.Tracer } + +// Start creates a span. The created span will be set in a child context of ctx +// and returned with the span. +// +// If ctx contains a span context, the returned span will also contain that +// span context. If the span context in ctx is for a non-recording span, that +// span instance will be returned directly. +func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { + span := trace.SpanFromContext(ctx) + + // If the parent context contains a non-zero span context, that span + // context needs to be returned as a non-recording span + // (https://github.com/open-telemetry/opentelemetry-specification/blob/3a1dde966a4ce87cce5adf464359fe369741bbea/specification/trace/api.md#behavior-of-the-api-in-the-absence-of-an-installed-sdk). + var zeroSC trace.SpanContext + if sc := span.SpanContext(); !sc.Equal(zeroSC) { + if !span.IsRecording() { + // If the span is not recording return it directly. + return ctx, span + } + // Otherwise, return the span context needs in a non-recording span. + span = Span{sc: sc} + } else { + // No parent, return a No-Op span with an empty span context. + span = noopSpanInstance + } + return trace.ContextWithSpan(ctx, span), span +} + +var noopSpanInstance trace.Span = Span{} + +// Span is an OpenTelemetry No-Op Span. +type Span struct { + embedded.Span + + sc trace.SpanContext +} + +// SpanContext returns an empty span context. +func (s Span) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (Span) IsRecording() bool { return false } + +// SetStatus does nothing. +func (Span) SetStatus(codes.Code, string) {} + +// SetAttributes does nothing. +func (Span) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (Span) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (Span) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (Span) AddEvent(string, ...trace.EventOption) {} + +// AddLink does nothing. +func (Span) AddLink(trace.Link) {} + +// SetName does nothing. +func (Span) SetName(string) {} + +// TracerProvider returns a No-Op TracerProvider. +func (Span) TracerProvider() trace.TracerProvider { return TracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 59e24816137..fb7d12673eb 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.32.0" + return "1.33.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index c04b12f6b74..9f878cd1fe7 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.32.0 + version: v1.33.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -23,11 +23,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.54.0 + version: v0.55.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.8.0 + version: v0.9.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log @@ -35,7 +35,7 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.11 + version: v0.0.12 modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index 04a10f51c97..6818b2de304 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -200,7 +200,17 @@ var WithDataWrapper = MarshalStyle(true) // WithoutDataWrapper marshals JSON without a {"data": ...} wrapper. var WithoutDataWrapper = MarshalStyle(false) +// JSONReader is like JSONBuffer, but returns an io.Reader instead. func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { + buf, err := wrap.JSONBuffer(v) + if err != nil { + return nil, err + } + return buf, nil +} + +// JSONBuffer encodes the body and wraps it if needed. +func (wrap MarshalStyle) JSONBuffer(v interface{}) (*bytes.Buffer, error) { buf := new(bytes.Buffer) if wrap { buf.Write([]byte(`{"data": `)) diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go index 9c2d660815e..85ba75d08f5 100644 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go @@ -57,11 +57,13 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "net/url" "strconv" "strings" + "github.com/googleapis/gax-go/v2/internallog" googleapi "google.golang.org/api/googleapi" internal "google.golang.org/api/internal" gensupport "google.golang.org/api/internal/gensupport" @@ -85,6 +87,7 @@ var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint var _ = internal.Version +var _ = internallog.New const apiId = "iamcredentials:v1" const apiName = "iamcredentials" @@ -115,7 +118,8 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err if err != nil { return nil, err } - s, err := New(client) + s := &Service{client: client, BasePath: basePath, logger: internaloption.GetLogger(opts)} + s.Projects = NewProjectsService(s) if err != nil { return nil, err } @@ -134,13 +138,12 @@ func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") } - s := &Service{client: client, BasePath: basePath} - s.Projects = NewProjectsService(s) - return s, nil + return NewService(context.Background(), option.WithHTTPClient(client)) } type Service struct { client *http.Client + logger *slog.Logger BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment @@ -524,8 +527,7 @@ func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Header() http.Header { func (c *ProjectsServiceAccountsGenerateAccessTokenCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.generateaccesstokenrequest) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.generateaccesstokenrequest) if err != nil { return nil, err } @@ -541,6 +543,7 @@ func (c *ProjectsServiceAccountsGenerateAccessTokenCall) doRequest(alt string) ( googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.generateAccessToken", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -576,9 +579,11 @@ func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Do(opts ...googleapi.Ca }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.generateAccessToken", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -629,8 +634,7 @@ func (c *ProjectsServiceAccountsGenerateIdTokenCall) Header() http.Header { func (c *ProjectsServiceAccountsGenerateIdTokenCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.generateidtokenrequest) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.generateidtokenrequest) if err != nil { return nil, err } @@ -646,6 +650,7 @@ func (c *ProjectsServiceAccountsGenerateIdTokenCall) doRequest(alt string) (*htt googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.generateIdToken", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -681,9 +686,11 @@ func (c *ProjectsServiceAccountsGenerateIdTokenCall) Do(opts ...googleapi.CallOp }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.generateIdToken", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -742,12 +749,11 @@ func (c *ProjectsServiceAccountsGetAllowedLocationsCall) doRequest(alt string) ( if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1/{+name}/allowedLocations") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -755,6 +761,7 @@ func (c *ProjectsServiceAccountsGetAllowedLocationsCall) doRequest(alt string) ( googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.getAllowedLocations", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -790,9 +797,11 @@ func (c *ProjectsServiceAccountsGetAllowedLocationsCall) Do(opts ...googleapi.Ca }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.getAllowedLocations", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -843,8 +852,7 @@ func (c *ProjectsServiceAccountsSignBlobCall) Header() http.Header { func (c *ProjectsServiceAccountsSignBlobCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.signblobrequest) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.signblobrequest) if err != nil { return nil, err } @@ -860,6 +868,7 @@ func (c *ProjectsServiceAccountsSignBlobCall) doRequest(alt string) (*http.Respo googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.signBlob", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -895,9 +904,11 @@ func (c *ProjectsServiceAccountsSignBlobCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.signBlob", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -948,8 +959,7 @@ func (c *ProjectsServiceAccountsSignJwtCall) Header() http.Header { func (c *ProjectsServiceAccountsSignJwtCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.signjwtrequest) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.signjwtrequest) if err != nil { return nil, err } @@ -965,6 +975,7 @@ func (c *ProjectsServiceAccountsSignJwtCall) doRequest(alt string) (*http.Respon googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.signJwt", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -1000,8 +1011,10 @@ func (c *ProjectsServiceAccountsSignJwtCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "iamcredentials.projects.serviceAccounts.signJwt", "response", internallog.HTTPResponse(res, b)) return ret, nil } diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index 4ebeb61c1a2..86861e24383 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -15,6 +15,7 @@ import ( "os" "time" + "cloud.google.com/go/auth" "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/oauth2adapt" "golang.org/x/oauth2" @@ -30,7 +31,7 @@ const quotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" // it returns default credential information. func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { if ds.IsNewAuthLibraryEnabled() { - return credsNewAuth(ctx, ds) + return credsNewAuth(ds) } creds, err := baseCreds(ctx, ds) if err != nil { @@ -42,6 +43,30 @@ func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { return creds, nil } +// AuthCreds returns [cloud.google.com/go/auth.Credentials] based on credentials +// options provided via [option.ClientOption], including legacy oauth2/google +// options. If there are no applicable options, then it returns the result of +// [cloud.google.com/go/auth/credentials.DetectDefault]. +func AuthCreds(ctx context.Context, settings *DialSettings) (*auth.Credentials, error) { + if settings.AuthCredentials != nil { + return settings.AuthCredentials, nil + } + // Support oauth2/google options + var oauth2Creds *google.Credentials + if settings.InternalCredentials != nil { + oauth2Creds = settings.InternalCredentials + } else if settings.Credentials != nil { + oauth2Creds = settings.Credentials + } else if settings.TokenSource != nil { + oauth2Creds = &google.Credentials{TokenSource: settings.TokenSource} + } + if oauth2Creds != nil { + return oauth2adapt.AuthCredentialsFromOauth2Credentials(oauth2Creds), nil + } + + return detectDefaultFromDialSettings(settings) +} + // GetOAuth2Configuration determines configurations for the OAuth2 transport, which is separate from the API transport. // The OAuth2 transport and endpoint will be configured for mTLS if applicable. func GetOAuth2Configuration(ctx context.Context, settings *DialSettings) (string, *http.Client, error) { @@ -62,7 +87,7 @@ func GetOAuth2Configuration(ctx context.Context, settings *DialSettings) (string return tokenURL, oauth2Client, nil } -func credsNewAuth(ctx context.Context, settings *DialSettings) (*google.Credentials, error) { +func credsNewAuth(settings *DialSettings) (*google.Credentials, error) { // Preserve old options behavior if settings.InternalCredentials != nil { return settings.InternalCredentials, nil @@ -76,6 +101,14 @@ func credsNewAuth(ctx context.Context, settings *DialSettings) (*google.Credenti return oauth2adapt.Oauth2CredentialsFromAuthCredentials(settings.AuthCredentials), nil } + creds, err := detectDefaultFromDialSettings(settings) + if err != nil { + return nil, err + } + return oauth2adapt.Oauth2CredentialsFromAuthCredentials(creds), nil +} + +func detectDefaultFromDialSettings(settings *DialSettings) (*auth.Credentials, error) { var useSelfSignedJWT bool var aud string var scopes []string @@ -100,18 +133,14 @@ func credsNewAuth(ctx context.Context, settings *DialSettings) (*google.Credenti aud = settings.DefaultAudience } - creds, err := credentials.DetectDefault(&credentials.DetectOptions{ + return credentials.DetectDefault(&credentials.DetectOptions{ Scopes: scopes, Audience: aud, CredentialsFile: settings.CredentialsFile, CredentialsJSON: settings.CredentialsJSON, UseSelfSignedJWT: useSelfSignedJWT, + Logger: settings.Logger, }) - if err != nil { - return nil, err - } - - return oauth2adapt.Oauth2CredentialsFromAuthCredentials(creds), nil } func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { diff --git a/vendor/google.golang.org/api/internal/gensupport/media.go b/vendor/google.golang.org/api/internal/gensupport/media.go index 0861d4d3c87..8c7435de3e6 100644 --- a/vendor/google.golang.org/api/internal/gensupport/media.go +++ b/vendor/google.golang.org/api/internal/gensupport/media.go @@ -200,6 +200,9 @@ func (mi *MediaInfo) UploadType() string { // UploadRequest sets up an HTTP request for media upload. It adds headers // as necessary, and returns a replacement for the body and a function for http.Request.GetBody. func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, getBody func() (io.ReadCloser, error), cleanup func()) { + if body == nil { + body = new(bytes.Buffer) + } cleanup = func() {} if mi == nil { return body, nil, cleanup diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go index 9e3bcf15963..a87fd3e727b 100644 --- a/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -266,7 +266,7 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err // The upload should be retried if the rCtx is canceled due to a timeout. select { case <-rCtx.Done(): - if errors.Is(rCtx.Err(), context.DeadlineExceeded) { + if rx.ChunkTransferTimeout != 0 && errors.Is(rCtx.Err(), context.DeadlineExceeded) { // Cancel the context for rCtx cancel() continue diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index f6716134ebf..1c91f147abe 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -9,6 +9,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "net/http" "strings" "time" @@ -222,3 +223,19 @@ func DecodeResponse(target interface{}, res *http.Response) error { } return json.NewDecoder(res.Body).Decode(target) } + +// DecodeResponseBytes decodes the body of res into target and returns bytes read +// from the body. If there is no body, target is unchanged. +func DecodeResponseBytes(target interface{}, res *http.Response) ([]byte, error) { + if res.StatusCode == http.StatusNoContent { + return nil, nil + } + b, err := io.ReadAll(res.Body) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, target); err != nil { + return nil, err + } + return b, nil +} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 4839ab6d4fa..4f5b1a0ebea 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -72,6 +72,9 @@ type DialSettings struct { // New Auth library Options AuthCredentials *auth.Credentials EnableNewAuthLibrary bool + + // TODO(b/372244283): Remove after b/358175516 has been fixed + EnableAsyncRefreshDryRun func() } // GetScopes returns the user-provided scopes, if set, or else falls back to the diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index a9925b599c3..532de375dd4 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.209.0" +const Version = "0.213.0" diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go index 482d565fec9..c63c0c194ae 100644 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -6,8 +6,10 @@ package internaloption import ( + "context" "log/slog" + "cloud.google.com/go/auth" "github.com/googleapis/gax-go/v2/internallog" "golang.org/x/oauth2/google" "google.golang.org/api/internal" @@ -209,6 +211,33 @@ func (w enableNewAuthLibrary) Apply(o *internal.DialSettings) { o.EnableNewAuthLibrary = bool(w) } +// EnableAsyncRefreshDryRun returns a ClientOption that specifies if libraries in this +// module should asynchronously refresh auth token in parallel to sync refresh. +// +// This option can be used to determine whether refreshing the token asymnchronously +// prior to its actual expiry works without any issues in a particular environment. +// +// errHandler function will be called when there is an error while refreshing +// the token asynchronously. +// +// This is an EXPERIMENTAL option and will be removed in the future. +// TODO(b/372244283): Remove after b/358175516 has been fixed +func EnableAsyncRefreshDryRun(errHandler func()) option.ClientOption { + return enableAsyncRefreshDryRun{ + errHandler: errHandler, + } +} + +// TODO(b/372244283): Remove after b/358175516 has been fixed +type enableAsyncRefreshDryRun struct { + errHandler func() +} + +// TODO(b/372244283): Remove after b/358175516 has been fixed +func (w enableAsyncRefreshDryRun) Apply(o *internal.DialSettings) { + o.EnableAsyncRefreshDryRun = w.errHandler +} + // EmbeddableAdapter is a no-op option.ClientOption that allow libraries to // create their own client options by embedding this type into their own // client-specific option wrapper. See example for usage. @@ -228,3 +257,33 @@ func GetLogger(opts []option.ClientOption) *slog.Logger { } return internallog.New(ds.Logger) } + +// AuthCreds returns [cloud.google.com/go/auth.Credentials] using the following +// options provided via [option.ClientOption], including legacy oauth2/google +// options, in this order: +// +// * [option.WithAuthCredentials] +// * [option/internaloption.WithCredentials] (internal use only) +// * [option.WithCredentials] +// * [option.WithTokenSource] +// +// If there are no applicable credentials options, then it passes the +// following options to [cloud.google.com/go/auth/credentials.DetectDefault] and +// returns the result: +// +// * [option.WithAudiences] +// * [option.WithCredentialsFile] +// * [option.WithCredentialsJSON] +// * [option.WithScopes] +// * [option/internaloption.WithDefaultScopes] (internal use only) +// * [option/internaloption.EnableJwtWithScope] (internal use only) +// +// This function should only be used internally by generated clients. This is an +// EXPERIMENTAL API and may be changed or removed in the future. +func AuthCreds(ctx context.Context, opts []option.ClientOption) (*auth.Credentials, error) { + var ds internal.DialSettings + for _, opt := range opts { + opt.Apply(&ds) + } + return internal.AuthCreds(ctx, &ds) +} diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 96599143e6a..992c4c0145a 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -93,7 +93,7 @@ "location": "us-west4" } ], - "etag": "\"3134393437363236373436353839383934323639\"", + "etag": "\"3133343838373034343130353038353234313337\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -3224,6 +3224,98 @@ ], "supportsSubscription": true }, + "move": { + "description": "Moves the source object to the destination object in the same bucket.", + "httpMethod": "POST", + "id": "storage.objects.move", + "parameterOrder": [ + "bucket", + "sourceObject", + "destinationObject" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "destinationObject": { + "description": "Name of the destination object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object. `ifGenerationMatch` and `ifGenerationNotMatch` conditions are mutually exclusive: it's an error for both of them to be set in the request.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.`ifGenerationMatch` and `ifGenerationNotMatch` conditions are mutually exclusive: it's an error for both of them to be set in the request.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value. `ifMetagenerationMatch` and `ifMetagenerationNotMatch` conditions are mutually exclusive: it's an error for both of them to be set in the request.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value. `ifMetagenerationMatch` and `ifMetagenerationNotMatch` conditions are mutually exclusive: it's an error for both of them to be set in the request.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceGenerationMatch": { + "description": "Makes the operation conditional on whether the source object's current generation matches the given value. `ifSourceGenerationMatch` and `ifSourceGenerationNotMatch` conditions are mutually exclusive: it's an error for both of them to be set in the request.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceGenerationNotMatch": { + "description": "Makes the operation conditional on whether the source object's current generation does not match the given value. `ifSourceGenerationMatch` and `ifSourceGenerationNotMatch` conditions are mutually exclusive: it's an error for both of them to be set in the request.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceMetagenerationMatch": { + "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value. `ifSourceMetagenerationMatch` and `ifSourceMetagenerationNotMatch` conditions are mutually exclusive: it's an error for both of them to be set in the request.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value. `ifSourceMetagenerationMatch` and `ifSourceMetagenerationNotMatch` conditions are mutually exclusive: it's an error for both of them to be set in the request.", + "format": "int64", + "location": "query", + "type": "string" + }, + "sourceObject": { + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{sourceObject}/moveTo/o/{destinationObject}", + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, "patch": { "description": "Patches an object's metadata.", "httpMethod": "PATCH", @@ -4272,7 +4364,7 @@ } } }, - "revision": "20241113", + "revision": "20241206", "rootUrl": "https://storage.googleapis.com/", "schemas": { "AdvanceRelocateBucketOperationRequest": { diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 8cb6c0771d5..474fbb49846 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -64,12 +64,14 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "net/url" "strconv" "strings" "github.com/googleapis/gax-go/v2" + "github.com/googleapis/gax-go/v2/internallog" googleapi "google.golang.org/api/googleapi" internal "google.golang.org/api/internal" gensupport "google.golang.org/api/internal/gensupport" @@ -93,6 +95,7 @@ var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint var _ = internal.Version +var _ = internallog.New var _ = gax.Version const apiId = "storage:v1" @@ -139,7 +142,19 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err if err != nil { return nil, err } - s, err := New(client) + s := &Service{client: client, BasePath: basePath, logger: internaloption.GetLogger(opts)} + s.AnywhereCaches = NewAnywhereCachesService(s) + s.BucketAccessControls = NewBucketAccessControlsService(s) + s.Buckets = NewBucketsService(s) + s.Channels = NewChannelsService(s) + s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) + s.Folders = NewFoldersService(s) + s.ManagedFolders = NewManagedFoldersService(s) + s.Notifications = NewNotificationsService(s) + s.ObjectAccessControls = NewObjectAccessControlsService(s) + s.Objects = NewObjectsService(s) + s.Operations = NewOperationsService(s) + s.Projects = NewProjectsService(s) if err != nil { return nil, err } @@ -158,24 +173,12 @@ func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") } - s := &Service{client: client, BasePath: basePath} - s.AnywhereCaches = NewAnywhereCachesService(s) - s.BucketAccessControls = NewBucketAccessControlsService(s) - s.Buckets = NewBucketsService(s) - s.Channels = NewChannelsService(s) - s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) - s.Folders = NewFoldersService(s) - s.ManagedFolders = NewManagedFoldersService(s) - s.Notifications = NewNotificationsService(s) - s.ObjectAccessControls = NewObjectAccessControlsService(s) - s.Objects = NewObjectsService(s) - s.Operations = NewOperationsService(s) - s.Projects = NewProjectsService(s) - return s, nil + return NewService(context.Background(), option.WithHTTPClient(client)) } type Service struct { client *http.Client + logger *slog.Logger BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment @@ -2892,12 +2895,11 @@ func (c *AnywhereCachesDisableCall) Header() http.Header { func (c *AnywhereCachesDisableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/disable") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, nil) if err != nil { return nil, err } @@ -2906,6 +2908,7 @@ func (c *AnywhereCachesDisableCall) doRequest(alt string) (*http.Response, error "bucket": c.bucket, "anywhereCacheId": c.anywhereCacheId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.disable", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -2940,9 +2943,11 @@ func (c *AnywhereCachesDisableCall) Do(opts ...googleapi.CallOption) (*AnywhereC }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.disable", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -3003,12 +3008,11 @@ func (c *AnywhereCachesGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -3017,6 +3021,7 @@ func (c *AnywhereCachesGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "anywhereCacheId": c.anywhereCacheId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3051,9 +3056,11 @@ func (c *AnywhereCachesGetCall) Do(opts ...googleapi.CallOption) (*AnywhereCache }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -3101,8 +3108,7 @@ func (c *AnywhereCachesInsertCall) Header() http.Header { func (c *AnywhereCachesInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.anywherecache) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.anywherecache) if err != nil { return nil, err } @@ -3118,6 +3124,7 @@ func (c *AnywhereCachesInsertCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.insert", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3153,9 +3160,11 @@ func (c *AnywhereCachesInsertCall) Do(opts ...googleapi.CallOption) (*GoogleLong }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.insert", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -3228,12 +3237,11 @@ func (c *AnywhereCachesListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -3241,6 +3249,7 @@ func (c *AnywhereCachesListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3275,9 +3284,11 @@ func (c *AnywhereCachesListCall) Do(opts ...googleapi.CallOption) (*AnywhereCach }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -3347,12 +3358,11 @@ func (c *AnywhereCachesPauseCall) Header() http.Header { func (c *AnywhereCachesPauseCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/pause") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, nil) if err != nil { return nil, err } @@ -3361,6 +3371,7 @@ func (c *AnywhereCachesPauseCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "anywhereCacheId": c.anywhereCacheId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.pause", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3395,9 +3406,11 @@ func (c *AnywhereCachesPauseCall) Do(opts ...googleapi.CallOption) (*AnywhereCac }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.pause", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -3446,12 +3459,11 @@ func (c *AnywhereCachesResumeCall) Header() http.Header { func (c *AnywhereCachesResumeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/resume") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, nil) if err != nil { return nil, err } @@ -3460,6 +3472,7 @@ func (c *AnywhereCachesResumeCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "anywhereCacheId": c.anywhereCacheId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.resume", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3494,9 +3507,11 @@ func (c *AnywhereCachesResumeCall) Do(opts ...googleapi.CallOption) (*AnywhereCa }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.resume", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -3548,8 +3563,7 @@ func (c *AnywhereCachesUpdateCall) Header() http.Header { func (c *AnywhereCachesUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.anywherecache) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.anywherecache) if err != nil { return nil, err } @@ -3566,6 +3580,7 @@ func (c *AnywhereCachesUpdateCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "anywhereCacheId": c.anywhereCacheId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.anywhereCaches.update", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3601,9 +3616,11 @@ func (c *AnywhereCachesUpdateCall) Do(opts ...googleapi.CallOption) (*GoogleLong }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.anywhereCaches.update", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -3662,12 +3679,11 @@ func (c *BucketAccessControlsDeleteCall) Header() http.Header { func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, nil) if err != nil { return nil, err } @@ -3676,6 +3692,7 @@ func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, "bucket": c.bucket, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.delete", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3690,6 +3707,7 @@ func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.delete", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -3759,12 +3777,11 @@ func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, err if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -3773,6 +3790,7 @@ func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, err "bucket": c.bucket, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3808,9 +3826,11 @@ func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketA }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -3865,8 +3885,7 @@ func (c *BucketAccessControlsInsertCall) Header() http.Header { func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucketaccesscontrol) if err != nil { return nil, err } @@ -3882,6 +3901,7 @@ func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.insert", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -3917,9 +3937,11 @@ func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Buck }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.insert", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -3984,12 +4006,11 @@ func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, er if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -3997,6 +4018,7 @@ func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, er googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4032,9 +4054,11 @@ func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Bucket }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -4094,8 +4118,7 @@ func (c *BucketAccessControlsPatchCall) Header() http.Header { func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucketaccesscontrol) if err != nil { return nil, err } @@ -4112,6 +4135,7 @@ func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, e "bucket": c.bucket, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.patch", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4147,9 +4171,11 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.patch", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -4209,8 +4235,7 @@ func (c *BucketAccessControlsUpdateCall) Header() http.Header { func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucketaccesscontrol) if err != nil { return nil, err } @@ -4227,6 +4252,7 @@ func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, "bucket": c.bucket, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.update", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4262,9 +4288,11 @@ func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Buck }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.bucketAccessControls.update", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -4333,12 +4361,11 @@ func (c *BucketsDeleteCall) Header() http.Header { func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, nil) if err != nil { return nil, err } @@ -4346,6 +4373,7 @@ func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.delete", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4360,6 +4388,7 @@ func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.delete", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -4469,12 +4498,11 @@ func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -4482,6 +4510,7 @@ func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4516,9 +4545,11 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -4592,12 +4623,11 @@ func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -4605,6 +4635,7 @@ func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.getIamPolicy", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4639,9 +4670,11 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.getIamPolicy", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -4708,12 +4741,11 @@ func (c *BucketsGetStorageLayoutCall) doRequest(alt string) (*http.Response, err if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/storageLayout") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -4721,6 +4753,7 @@ func (c *BucketsGetStorageLayoutCall) doRequest(alt string) (*http.Response, err googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.getStorageLayout", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4756,9 +4789,11 @@ func (c *BucketsGetStorageLayoutCall) Do(opts ...googleapi.CallOption) (*BucketS }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.getStorageLayout", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -4889,8 +4924,7 @@ func (c *BucketsInsertCall) Header() http.Header { func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucket) if err != nil { return nil, err } @@ -4903,6 +4937,7 @@ func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { return nil, err } req.Header = reqHeaders + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.insert", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -4937,9 +4972,11 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.insert", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -5046,16 +5083,16 @@ func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } req.Header = reqHeaders + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5090,9 +5127,11 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -5169,12 +5208,11 @@ func (c *BucketsLockRetentionPolicyCall) Header() http.Header { func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/lockRetentionPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, nil) if err != nil { return nil, err } @@ -5182,6 +5220,7 @@ func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.lockRetentionPolicy", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5216,9 +5255,11 @@ func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Buck }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.lockRetentionPolicy", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -5360,8 +5401,7 @@ func (c *BucketsPatchCall) Header() http.Header { func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucket2) if err != nil { return nil, err } @@ -5377,6 +5417,7 @@ func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.patch", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5411,9 +5452,11 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.patch", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -5462,8 +5505,7 @@ func (c *BucketsRelocateCall) Header() http.Header { func (c *BucketsRelocateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.relocatebucketrequest) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.relocatebucketrequest) if err != nil { return nil, err } @@ -5479,6 +5521,7 @@ func (c *BucketsRelocateCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.relocate", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5514,9 +5557,11 @@ func (c *BucketsRelocateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunni }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.relocate", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -5583,12 +5628,11 @@ func (c *BucketsRestoreCall) Header() http.Header { func (c *BucketsRestoreCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/restore") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, nil) if err != nil { return nil, err } @@ -5596,6 +5640,7 @@ func (c *BucketsRestoreCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.restore", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5630,9 +5675,11 @@ func (c *BucketsRestoreCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.restore", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -5687,8 +5734,7 @@ func (c *BucketsSetIamPolicyCall) Header() http.Header { func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.policy) if err != nil { return nil, err } @@ -5704,6 +5750,7 @@ func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.setIamPolicy", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5738,9 +5785,11 @@ func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.setIamPolicy", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -5808,12 +5857,11 @@ func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam/testPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -5821,6 +5869,7 @@ func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.testIamPermissions", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -5856,9 +5905,11 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.testIamPermissions", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -6000,8 +6051,7 @@ func (c *BucketsUpdateCall) Header() http.Header { func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bucket2) if err != nil { return nil, err } @@ -6017,6 +6067,7 @@ func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.update", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6051,9 +6102,11 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.update", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -6097,8 +6150,7 @@ func (c *ChannelsStopCall) Header() http.Header { func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.channel) if err != nil { return nil, err } @@ -6111,6 +6163,7 @@ func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { return nil, err } req.Header = reqHeaders + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.channels.stop", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6125,6 +6178,7 @@ func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.channels.stop", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -6183,12 +6237,11 @@ func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, nil) if err != nil { return nil, err } @@ -6197,6 +6250,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Res "bucket": c.bucket, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.delete", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6211,6 +6265,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.delete", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -6281,12 +6336,11 @@ func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Respon if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -6295,6 +6349,7 @@ func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Respon "bucket": c.bucket, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6330,9 +6385,11 @@ func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (* }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -6387,8 +6444,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) if err != nil { return nil, err } @@ -6404,6 +6460,7 @@ func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Res googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.insert", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6439,9 +6496,11 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.insert", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -6522,12 +6581,11 @@ func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Respo if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -6535,6 +6593,7 @@ func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Respo googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6570,9 +6629,11 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -6632,8 +6693,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) if err != nil { return nil, err } @@ -6650,6 +6710,7 @@ func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Resp "bucket": c.bucket, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.patch", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6685,9 +6746,11 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.patch", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -6747,8 +6810,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) if err != nil { return nil, err } @@ -6765,6 +6827,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Res "bucket": c.bucket, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.update", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6800,9 +6863,11 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.defaultObjectAccessControls.update", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -6867,12 +6932,11 @@ func (c *FoldersDeleteCall) Header() http.Header { func (c *FoldersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{folder}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, nil) if err != nil { return nil, err } @@ -6881,6 +6945,7 @@ func (c *FoldersDeleteCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "folder": c.folder, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.folders.delete", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -6895,6 +6960,7 @@ func (c *FoldersDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.folders.delete", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -6973,12 +7039,11 @@ func (c *FoldersGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{folder}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -6987,6 +7052,7 @@ func (c *FoldersGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "folder": c.folder, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.folders.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7021,9 +7087,11 @@ func (c *FoldersGetCall) Do(opts ...googleapi.CallOption) (*Folder, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.folders.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -7079,8 +7147,7 @@ func (c *FoldersInsertCall) Header() http.Header { func (c *FoldersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.folder) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.folder) if err != nil { return nil, err } @@ -7096,6 +7163,7 @@ func (c *FoldersInsertCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.folders.insert", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7130,9 +7198,11 @@ func (c *FoldersInsertCall) Do(opts ...googleapi.CallOption) (*Folder, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.folders.insert", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -7240,12 +7310,11 @@ func (c *FoldersListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -7253,6 +7322,7 @@ func (c *FoldersListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.folders.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7287,9 +7357,11 @@ func (c *FoldersListCall) Do(opts ...googleapi.CallOption) (*Folders, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.folders.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -7379,12 +7451,11 @@ func (c *FoldersRenameCall) Header() http.Header { func (c *FoldersRenameCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{sourceFolder}/renameTo/folders/{destinationFolder}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, nil) if err != nil { return nil, err } @@ -7394,6 +7465,7 @@ func (c *FoldersRenameCall) doRequest(alt string) (*http.Response, error) { "sourceFolder": c.sourceFolder, "destinationFolder": c.destinationFolder, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.folders.rename", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7429,9 +7501,11 @@ func (c *FoldersRenameCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunning }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.folders.rename", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -7505,12 +7579,11 @@ func (c *ManagedFoldersDeleteCall) Header() http.Header { func (c *ManagedFoldersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, nil) if err != nil { return nil, err } @@ -7519,6 +7592,7 @@ func (c *ManagedFoldersDeleteCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "managedFolder": c.managedFolder, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.delete", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7533,6 +7607,7 @@ func (c *ManagedFoldersDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.delete", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -7610,12 +7685,11 @@ func (c *ManagedFoldersGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -7624,6 +7698,7 @@ func (c *ManagedFoldersGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "managedFolder": c.managedFolder, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7658,9 +7733,11 @@ func (c *ManagedFoldersGetCall) Do(opts ...googleapi.CallOption) (*ManagedFolder }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -7737,12 +7814,11 @@ func (c *ManagedFoldersGetIamPolicyCall) doRequest(alt string) (*http.Response, if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -7751,6 +7827,7 @@ func (c *ManagedFoldersGetIamPolicyCall) doRequest(alt string) (*http.Response, "bucket": c.bucket, "managedFolder": c.managedFolder, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.getIamPolicy", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7785,9 +7862,11 @@ func (c *ManagedFoldersGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.getIamPolicy", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -7835,8 +7914,7 @@ func (c *ManagedFoldersInsertCall) Header() http.Header { func (c *ManagedFoldersInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedfolder) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.managedfolder) if err != nil { return nil, err } @@ -7852,6 +7930,7 @@ func (c *ManagedFoldersInsertCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.insert", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -7886,9 +7965,11 @@ func (c *ManagedFoldersInsertCall) Do(opts ...googleapi.CallOption) (*ManagedFol }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.insert", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -7967,12 +8048,11 @@ func (c *ManagedFoldersListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -7980,6 +8060,7 @@ func (c *ManagedFoldersListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8014,9 +8095,11 @@ func (c *ManagedFoldersListCall) Do(opts ...googleapi.CallOption) (*ManagedFolde }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -8095,8 +8178,7 @@ func (c *ManagedFoldersSetIamPolicyCall) Header() http.Header { func (c *ManagedFoldersSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.policy) if err != nil { return nil, err } @@ -8113,6 +8195,7 @@ func (c *ManagedFoldersSetIamPolicyCall) doRequest(alt string) (*http.Response, "bucket": c.bucket, "managedFolder": c.managedFolder, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.setIamPolicy", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8147,9 +8230,11 @@ func (c *ManagedFoldersSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Poli }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.setIamPolicy", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -8220,12 +8305,11 @@ func (c *ManagedFoldersTestIamPermissionsCall) doRequest(alt string) (*http.Resp if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam/testPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -8234,6 +8318,7 @@ func (c *ManagedFoldersTestIamPermissionsCall) doRequest(alt string) (*http.Resp "bucket": c.bucket, "managedFolder": c.managedFolder, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.managedFolders.testIamPermissions", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8269,9 +8354,11 @@ func (c *ManagedFoldersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.managedFolders.testIamPermissions", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -8327,12 +8414,11 @@ func (c *NotificationsDeleteCall) Header() http.Header { func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, nil) if err != nil { return nil, err } @@ -8341,6 +8427,7 @@ func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "notification": c.notification, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.notifications.delete", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8355,6 +8442,7 @@ func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.notifications.delete", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -8422,12 +8510,11 @@ func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -8436,6 +8523,7 @@ func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "notification": c.notification, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.notifications.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8470,9 +8558,11 @@ func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.notifications.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -8527,8 +8617,7 @@ func (c *NotificationsInsertCall) Header() http.Header { func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.notification) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.notification) if err != nil { return nil, err } @@ -8544,6 +8633,7 @@ func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.notifications.insert", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8578,9 +8668,11 @@ func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notificatio }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.notifications.insert", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -8645,12 +8737,11 @@ func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -8658,6 +8749,7 @@ func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.notifications.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8692,9 +8784,11 @@ func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.notifications.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -8766,12 +8860,11 @@ func (c *ObjectAccessControlsDeleteCall) Header() http.Header { func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, nil) if err != nil { return nil, err } @@ -8781,6 +8874,7 @@ func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, "object": c.object, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.delete", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8795,6 +8889,7 @@ func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.delete", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -8877,12 +8972,11 @@ func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, err if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -8892,6 +8986,7 @@ func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, err "object": c.object, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -8927,9 +9022,11 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -8997,8 +9094,7 @@ func (c *ObjectAccessControlsInsertCall) Header() http.Header { func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) if err != nil { return nil, err } @@ -9015,6 +9111,7 @@ func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, "bucket": c.bucket, "object": c.object, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.insert", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9050,9 +9147,11 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.insert", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -9130,12 +9229,11 @@ func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, er if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -9144,6 +9242,7 @@ func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, er "bucket": c.bucket, "object": c.object, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9179,9 +9278,11 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -9254,8 +9355,7 @@ func (c *ObjectAccessControlsPatchCall) Header() http.Header { func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) if err != nil { return nil, err } @@ -9273,6 +9373,7 @@ func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, e "object": c.object, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.patch", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9308,9 +9409,11 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.patch", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -9383,8 +9486,7 @@ func (c *ObjectAccessControlsUpdateCall) Header() http.Header { func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.objectaccesscontrol) if err != nil { return nil, err } @@ -9402,6 +9504,7 @@ func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, "object": c.object, "entity": c.entity, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objectAccessControls.update", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9437,9 +9540,11 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objectAccessControls.update", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -9488,8 +9593,7 @@ func (c *ObjectsBulkRestoreCall) Header() http.Header { func (c *ObjectsBulkRestoreCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkrestoreobjectsrequest) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.bulkrestoreobjectsrequest) if err != nil { return nil, err } @@ -9505,6 +9609,7 @@ func (c *ObjectsBulkRestoreCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.bulkRestore", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9540,9 +9645,11 @@ func (c *ObjectsBulkRestoreCall) Do(opts ...googleapi.CallOption) (*GoogleLongru }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.bulkRestore", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -9662,8 +9769,7 @@ func (c *ObjectsComposeCall) Header() http.Header { func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.composerequest) if err != nil { return nil, err } @@ -9680,6 +9786,7 @@ func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.compose", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9714,9 +9821,11 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.compose", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -9918,8 +10027,7 @@ func (c *ObjectsCopyCall) Header() http.Header { func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.object) if err != nil { return nil, err } @@ -9938,6 +10046,7 @@ func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.copy", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -9972,9 +10081,11 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.copy", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -10077,12 +10188,11 @@ func (c *ObjectsDeleteCall) Header() http.Header { func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, nil) if err != nil { return nil, err } @@ -10091,6 +10201,7 @@ func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "object": c.object, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.delete", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -10105,6 +10216,7 @@ func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.delete", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -10249,12 +10361,11 @@ func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -10263,6 +10374,7 @@ func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "object": c.object, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -10313,9 +10425,11 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -10393,12 +10507,11 @@ func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -10407,6 +10520,7 @@ func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "object": c.object, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.getIamPolicy", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -10441,9 +10555,11 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.getIamPolicy", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -10675,8 +10791,7 @@ func (c *ObjectsInsertCall) Header() http.Header { func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.object) if err != nil { return nil, err } @@ -10687,14 +10802,10 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { urls = googleapi.ResolveRelative(c.s.BasePath, "/upload/storage/v1/b/{bucket}/o") c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType()) } - if body == nil { - body = new(bytes.Buffer) - reqHeaders.Set("Content-Type", "application/json") - } - body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body) + newBody, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body) defer cleanup() urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, newBody) if err != nil { return nil, err } @@ -10703,6 +10814,7 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.insert", "request", internallog.HTTPRequest(req, body.Bytes())) if c.retry != nil { return gensupport.SendRequestWithRetry(c.ctx_, c.s.client, req, c.retry) } @@ -10758,9 +10870,11 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.insert", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -10931,12 +11045,11 @@ func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -10944,6 +11057,7 @@ func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -10978,9 +11092,11 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -11005,6 +11121,207 @@ func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) err } } +type ObjectsMoveCall struct { + s *Service + bucket string + sourceObject string + destinationObject string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Move: Moves the source object to the destination object in the same bucket. +// +// - bucket: Name of the bucket in which the object resides. +// - destinationObject: Name of the destination object. For information about +// how to URL encode object names to be path safe, see Encoding URI Path +// Parts (https://cloud.google.com/storage/docs/request-endpoints#encoding). +// - sourceObject: Name of the source object. For information about how to URL +// encode object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Move(bucket string, sourceObject string, destinationObject string) *ObjectsMoveCall { + c := &ObjectsMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.sourceObject = sourceObject + c.destinationObject = destinationObject + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": Makes the +// operation conditional on whether the destination object's current generation +// matches the given value. Setting to 0 makes the operation succeed only if +// there are no live versions of the object. `ifGenerationMatch` and +// `ifGenerationNotMatch` conditions are mutually exclusive: it's an error for +// both of them to be set in the request. +func (c *ObjectsMoveCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsMoveCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter "ifGenerationNotMatch": +// Makes the operation conditional on whether the destination object's current +// generation does not match the given value. If no live object exists, the +// precondition fails. Setting to 0 makes the operation succeed only if there +// is a live version of the object.`ifGenerationMatch` and +// `ifGenerationNotMatch` conditions are mutually exclusive: it's an error for +// both of them to be set in the request. +func (c *ObjectsMoveCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsMoveCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter "ifMetagenerationMatch": +// Makes the operation conditional on whether the destination object's current +// metageneration matches the given value. `ifMetagenerationMatch` and +// `ifMetagenerationNotMatch` conditions are mutually exclusive: it's an error +// for both of them to be set in the request. +func (c *ObjectsMoveCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsMoveCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on whether the +// destination object's current metageneration does not match the given value. +// `ifMetagenerationMatch` and `ifMetagenerationNotMatch` conditions are +// mutually exclusive: it's an error for both of them to be set in the request. +func (c *ObjectsMoveCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsMoveCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// IfSourceGenerationMatch sets the optional parameter +// "ifSourceGenerationMatch": Makes the operation conditional on whether the +// source object's current generation matches the given value. +// `ifSourceGenerationMatch` and `ifSourceGenerationNotMatch` conditions are +// mutually exclusive: it's an error for both of them to be set in the request. +func (c *ObjectsMoveCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsMoveCall { + c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) + return c +} + +// IfSourceGenerationNotMatch sets the optional parameter +// "ifSourceGenerationNotMatch": Makes the operation conditional on whether the +// source object's current generation does not match the given value. +// `ifSourceGenerationMatch` and `ifSourceGenerationNotMatch` conditions are +// mutually exclusive: it's an error for both of them to be set in the request. +func (c *ObjectsMoveCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsMoveCall { + c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) + return c +} + +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on whether +// the source object's current metageneration matches the given value. +// `ifSourceMetagenerationMatch` and `ifSourceMetagenerationNotMatch` +// conditions are mutually exclusive: it's an error for both of them to be set +// in the request. +func (c *ObjectsMoveCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsMoveCall { + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) + return c +} + +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on whether +// the source object's current metageneration does not match the given value. +// `ifSourceMetagenerationMatch` and `ifSourceMetagenerationNotMatch` +// conditions are mutually exclusive: it's an error for both of them to be set +// in the request. +func (c *ObjectsMoveCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsMoveCall { + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to be +// billed for this request. Required for Requester Pays buckets. +func (c *ObjectsMoveCall) UserProject(userProject string) *ObjectsMoveCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse for more +// details. +func (c *ObjectsMoveCall) Fields(s ...googleapi.Field) *ObjectsMoveCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. +func (c *ObjectsMoveCall) Context(ctx context.Context) *ObjectsMoveCall { + c.ctx_ = ctx + return c +} + +// Header returns a http.Header that can be modified by the caller to add +// headers to the request. +func (c *ObjectsMoveCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsMoveCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{sourceObject}/moveTo/o/{destinationObject}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, nil) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "sourceObject": c.sourceObject, + "destinationObject": c.destinationObject, + }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.move", "request", internallog.HTTPRequest(req, nil)) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.move" call. +// Any non-2xx status code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was returned. +func (c *ObjectsMoveCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { + return nil, err + } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.move", "response", internallog.HTTPResponse(res, b)) + return ret, nil +} + type ObjectsPatchCall struct { s *Service bucket string @@ -11155,8 +11472,7 @@ func (c *ObjectsPatchCall) Header() http.Header { func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.object2) if err != nil { return nil, err } @@ -11173,6 +11489,7 @@ func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "object": c.object, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.patch", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -11207,9 +11524,11 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.patch", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -11334,12 +11653,11 @@ func (c *ObjectsRestoreCall) Header() http.Header { func (c *ObjectsRestoreCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/restore") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, nil) if err != nil { return nil, err } @@ -11348,6 +11666,7 @@ func (c *ObjectsRestoreCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "object": c.object, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.restore", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -11382,9 +11701,11 @@ func (c *ObjectsRestoreCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.restore", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -11608,8 +11929,7 @@ func (c *ObjectsRewriteCall) Header() http.Header { func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.object) if err != nil { return nil, err } @@ -11628,6 +11948,7 @@ func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.rewrite", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -11663,9 +11984,11 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.rewrite", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -11733,8 +12056,7 @@ func (c *ObjectsSetIamPolicyCall) Header() http.Header { func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.policy) if err != nil { return nil, err } @@ -11751,6 +12073,7 @@ func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) "bucket": c.bucket, "object": c.object, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.setIamPolicy", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -11785,9 +12108,11 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.setIamPolicy", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -11868,12 +12193,11 @@ func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam/testPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -11882,6 +12206,7 @@ func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e "bucket": c.bucket, "object": c.object, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.testIamPermissions", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -11917,9 +12242,11 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.testIamPermissions", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -12073,8 +12400,7 @@ func (c *ObjectsUpdateCall) Header() http.Header { func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.object2) if err != nil { return nil, err } @@ -12091,6 +12417,7 @@ func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "object": c.object, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.update", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12125,9 +12452,11 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.update", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -12263,8 +12592,7 @@ func (c *ObjectsWatchAllCall) Header() http.Header { func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.channel) if err != nil { return nil, err } @@ -12280,6 +12608,7 @@ func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.objects.watchAll", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12314,9 +12643,11 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.objects.watchAll", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -12371,8 +12702,7 @@ func (c *OperationsAdvanceRelocateBucketCall) Header() http.Header { func (c *OperationsAdvanceRelocateBucketCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.advancerelocatebucketoperationrequest) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.advancerelocatebucketoperationrequest) if err != nil { return nil, err } @@ -12389,6 +12719,7 @@ func (c *OperationsAdvanceRelocateBucketCall) doRequest(alt string) (*http.Respo "bucket": c.bucket, "operationId": c.operationId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.operations.advanceRelocateBucket", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12403,6 +12734,7 @@ func (c *OperationsAdvanceRelocateBucketCall) Do(opts ...googleapi.CallOption) e if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.operations.advanceRelocateBucket", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -12453,12 +12785,11 @@ func (c *OperationsCancelCall) Header() http.Header { func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}/cancel") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, nil) if err != nil { return nil, err } @@ -12467,6 +12798,7 @@ func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "operationId": c.operationId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.operations.cancel", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12481,6 +12813,7 @@ func (c *OperationsCancelCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.operations.cancel", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -12541,12 +12874,11 @@ func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -12555,6 +12887,7 @@ func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { "bucket": c.bucket, "operationId": c.operationId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.operations.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12590,9 +12923,11 @@ func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunning }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.operations.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -12674,12 +13009,11 @@ func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -12687,6 +13021,7 @@ func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.buckets.operations.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12722,9 +13057,11 @@ func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunnin }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.buckets.operations.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -12800,12 +13137,11 @@ func (c *ProjectsHmacKeysCreateCall) Header() http.Header { func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("POST", urls, nil) if err != nil { return nil, err } @@ -12813,6 +13149,7 @@ func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, erro googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.create", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12847,9 +13184,11 @@ func (c *ProjectsHmacKeysCreateCall) Do(opts ...googleapi.CallOption) (*HmacKey, }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.create", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -12905,12 +13244,11 @@ func (c *ProjectsHmacKeysDeleteCall) Header() http.Header { func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "", c.header_) - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("DELETE", urls, nil) if err != nil { return nil, err } @@ -12919,6 +13257,7 @@ func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, erro "projectId": c.projectId, "accessId": c.accessId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.delete", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -12933,6 +13272,7 @@ func (c *ProjectsHmacKeysDeleteCall) Do(opts ...googleapi.CallOption) error { if err := googleapi.CheckResponse(res); err != nil { return gensupport.WrapError(err) } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.delete", "response", internallog.HTTPResponse(res, nil)) return nil } @@ -13000,12 +13340,11 @@ func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -13014,6 +13353,7 @@ func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) "projectId": c.projectId, "accessId": c.accessId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -13049,9 +13389,11 @@ func (c *ProjectsHmacKeysGetCall) Do(opts ...googleapi.CallOption) (*HmacKeyMeta }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -13148,12 +13490,11 @@ func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -13161,6 +13502,7 @@ func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.list", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -13196,9 +13538,11 @@ func (c *ProjectsHmacKeysListCall) Do(opts ...googleapi.CallOption) (*HmacKeysMe }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.list", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -13280,8 +13624,7 @@ func (c *ProjectsHmacKeysUpdateCall) Header() http.Header { func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := gensupport.SetHeaders(c.s.userAgent(), "application/json", c.header_) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.hmackeymetadata) + body, err := googleapi.WithoutDataWrapper.JSONBuffer(c.hmackeymetadata) if err != nil { return nil, err } @@ -13298,6 +13641,7 @@ func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, erro "projectId": c.projectId, "accessId": c.accessId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.update", "request", internallog.HTTPRequest(req, body.Bytes())) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -13333,9 +13677,11 @@ func (c *ProjectsHmacKeysUpdateCall) Do(opts ...googleapi.CallOption) (*HmacKeyM }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.hmacKeys.update", "response", internallog.HTTPResponse(res, b)) return ret, nil } @@ -13401,12 +13747,11 @@ func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, e if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/serviceAccount") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("GET", urls, nil) if err != nil { return nil, err } @@ -13414,6 +13759,7 @@ func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, e googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) + c.s.logger.DebugContext(c.ctx_, "api request", "serviceName", apiName, "rpcName", "storage.projects.serviceAccount.get", "request", internallog.HTTPRequest(req, nil)) return gensupport.SendRequest(c.ctx_, c.s.client, req) } @@ -13448,8 +13794,10 @@ func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*Servi }, } target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { + b, err := gensupport.DecodeResponseBytes(target, res) + if err != nil { return nil, err } + c.s.logger.DebugContext(c.ctx_, "api response", "serviceName", apiName, "rpcName", "storage.projects.serviceAccount.get", "response", internallog.HTTPResponse(res, b)) return ret, nil } diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index 22940678d26..a354d223d31 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -22,7 +22,6 @@ import ( "cloud.google.com/go/auth/grpctransport" "cloud.google.com/go/auth/oauth2adapt" "cloud.google.com/go/compute/metadata" - "go.opencensus.io/plugin/ocgrpc" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "golang.org/x/oauth2" "golang.org/x/time/rate" @@ -236,6 +235,7 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna Audience: aud, CredentialsFile: ds.CredentialsFile, CredentialsJSON: ds.CredentialsJSON, + Logger: ds.Logger, }, InternalOptions: &grpctransport.InternalOptions{ EnableNonDefaultSAForDirectPath: ds.AllowNonDefaultServiceAccount, @@ -249,6 +249,7 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna SkipValidation: skipValidation, }, UniverseDomain: ds.UniverseDomain, + Logger: ds.Logger, }) return pool, err } @@ -262,6 +263,40 @@ func prepareDialOptsNewAuth(ds *internal.DialSettings) []grpc.DialOption { return append(opts, ds.GRPCDialOpts...) } +// dryRunAsync is a wrapper for oauth2.TokenSource that performs a sync refresh +// after an async refresh. Token generated by async refresh is not used. +// +// This is an EXPERIMENTAL feature and may be removed or changed in the future. +// It is a temporary struct to determine if the async refresh +// is working properly. +// TODO(b/372244283): Remove after b/358175516 has been fixed +type dryRunAsync struct { + asyncTokenSource oauth2.TokenSource + syncTokenSource oauth2.TokenSource + errHandler func() +} + +// TODO(b/372244283): Remove after b/358175516 has been fixed +func newDryRunAsync(ts oauth2.TokenSource, errHandler func()) dryRunAsync { + tp := auth.NewCachedTokenProvider(oauth2adapt.TokenProviderFromTokenSource(ts), nil) + asyncTs := oauth2adapt.TokenSourceFromTokenProvider(tp) + return dryRunAsync{ + syncTokenSource: ts, + asyncTokenSource: asyncTs, + errHandler: errHandler, + } +} + +// Token returns a token or an error. +// TODO(b/372244283): Remove after b/358175516 has been fixed +func (async dryRunAsync) Token() (*oauth2.Token, error) { + _, err := async.asyncTokenSource.Token() + if err != nil { + async.errHandler() + } + return async.syncTokenSource.Token() +} + func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.ClientConn, error) { if o.HTTPClient != nil { return nil, errors.New("unsupported HTTP client specified") @@ -298,8 +333,14 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C if err != nil { return nil, err } + + ts := creds.TokenSource + // TODO(b/372244283): Remove after b/358175516 has been fixed + if o.EnableAsyncRefreshDryRun != nil { + ts = newDryRunAsync(ts, o.EnableAsyncRefreshDryRun) + } grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(grpcTokenSource{ - TokenSource: oauth.TokenSource{TokenSource: creds.TokenSource}, + TokenSource: oauth.TokenSource{TokenSource: ts}, quotaProject: internal.GetQuotaProject(creds, o.QuotaProject), requestReason: o.RequestReason, })) @@ -345,7 +386,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C // Add tracing, but before the other options, so that clients can override the // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. - grpcOpts = addOCStatsHandler(grpcOpts, o) grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, o) grpcOpts = append(grpcOpts, o.GRPCDialOpts...) if o.UserAgent != "" { @@ -355,13 +395,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C return dialContext(ctx, endpoint, grpcOpts...) } -func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { - if settings.TelemetryDisabled { - return opts - } - return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) -} - func addOpenTelemetryStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { if settings.TelemetryDisabled { return opts diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index d5b213e0f08..6b7ea74ba41 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -19,7 +19,6 @@ import ( "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/httptransport" "cloud.google.com/go/auth/oauth2adapt" - "go.opencensus.io/plugin/ochttp" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "golang.org/x/net/http2" "golang.org/x/oauth2" @@ -27,7 +26,6 @@ import ( "google.golang.org/api/internal" "google.golang.org/api/internal/cert" "google.golang.org/api/option" - "google.golang.org/api/transport/http/internal/propagation" ) // NewClient returns an HTTP client for use communicating with a Google cloud @@ -121,6 +119,7 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. Audience: aud, CredentialsFile: ds.CredentialsFile, CredentialsJSON: ds.CredentialsJSON, + Logger: ds.Logger, }, InternalOptions: &httptransport.InternalOptions{ EnableJWTWithScope: ds.EnableJwtWithScope, @@ -131,6 +130,7 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. SkipValidation: skipValidation, }, UniverseDomain: ds.UniverseDomain, + Logger: ds.Logger, }) if err != nil { return nil, err @@ -165,10 +165,7 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna requestReason: settings.RequestReason, } var trans http.RoundTripper = paramTransport - // Give OpenTelemetry precedence over OpenCensus in case user configuration - // causes both to write the same header (`X-Cloud-Trace-Context`). trans = addOpenTelemetryTransport(trans, settings) - trans = addOCTransport(trans, settings) switch { case settings.NoAuth: // Do nothing. @@ -309,16 +306,6 @@ func addOpenTelemetryTransport(trans http.RoundTripper, settings *internal.DialS return otelhttp.NewTransport(trans) } -func addOCTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper { - if settings.TelemetryDisabled { - return trans - } - return &ochttp.Transport{ - Base: trans, - Propagation: &propagation.HTTPFormat{}, - } -} - // clonedTransport returns the given RoundTripper as a cloned *http.Transport. // It returns nil if the RoundTripper can't be cloned or coerced to // *http.Transport. diff --git a/vendor/google.golang.org/api/transport/http/internal/propagation/http.go b/vendor/google.golang.org/api/transport/http/internal/propagation/http.go deleted file mode 100644 index ba7512aa26d..00000000000 --- a/vendor/google.golang.org/api/transport/http/internal/propagation/http.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2018 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.8 -// +build go1.8 - -// Package propagation implements X-Cloud-Trace-Context header propagation used -// by Google Cloud products. -package propagation - -import ( - "encoding/binary" - "encoding/hex" - "fmt" - "net/http" - "strconv" - "strings" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -const ( - httpHeaderMaxSize = 200 - httpHeader = `X-Cloud-Trace-Context` -) - -var _ propagation.HTTPFormat = (*HTTPFormat)(nil) - -// HTTPFormat implements propagation.HTTPFormat to propagate -// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace. -type HTTPFormat struct{} - -// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests. -func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { - h := req.Header.Get(httpHeader) - // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. - // Return if the header is empty or missing, or if the header is unreasonably - // large, to avoid making unnecessary copies of a large string. - if h == "" || len(h) > httpHeaderMaxSize { - return trace.SpanContext{}, false - } - - // Parse the trace id field. - slash := strings.Index(h, `/`) - if slash == -1 { - return trace.SpanContext{}, false - } - tid, h := h[:slash], h[slash+1:] - - buf, err := hex.DecodeString(tid) - if err != nil { - return trace.SpanContext{}, false - } - copy(sc.TraceID[:], buf) - - // Parse the span id field. - spanstr := h - semicolon := strings.Index(h, `;`) - if semicolon != -1 { - spanstr, h = h[:semicolon], h[semicolon+1:] - } - sid, err := strconv.ParseUint(spanstr, 10, 64) - if err != nil { - return trace.SpanContext{}, false - } - binary.BigEndian.PutUint64(sc.SpanID[:], sid) - - // Parse the options field, options field is optional. - if !strings.HasPrefix(h, "o=") { - return sc, true - } - o, err := strconv.ParseUint(h[2:], 10, 64) - if err != nil { - return trace.SpanContext{}, false - } - sc.TraceOptions = trace.TraceOptions(o) - return sc, true -} - -// SpanContextToRequest modifies the given request to include a Stackdriver Trace header. -func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { - sid := binary.BigEndian.Uint64(sc.SpanID[:]) - header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) - req.Header.Set(httpHeader, header) -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index aa69fb4d509..4a9fce53c44 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -180,6 +180,8 @@ type CommonLanguageSettings struct { ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"` // The destination where API teams want this client library to be published. Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"` + // Configuration for which RPCs should be generated in the GAPIC client. + SelectiveGapicGeneration *SelectiveGapicGeneration `protobuf:"bytes,3,opt,name=selective_gapic_generation,json=selectiveGapicGeneration,proto3" json:"selective_gapic_generation,omitempty"` } func (x *CommonLanguageSettings) Reset() { @@ -229,6 +231,13 @@ func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination { return nil } +func (x *CommonLanguageSettings) GetSelectiveGapicGeneration() *SelectiveGapicGeneration { + if x != nil { + return x.SelectiveGapicGeneration + } + return nil +} + // Details about how and where to publish client libraries. type ClientLibrarySettings struct { state protoimpl.MessageState @@ -984,6 +993,16 @@ type GoSettings struct { // Some settings. Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Map of service names to renamed services. Keys are the package relative + // service names and values are the name to be used for the service client + // and call options. + // + // publishing: + // + // go_settings: + // renamed_services: + // Publisher: TopicAdmin + RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *GoSettings) Reset() { @@ -1025,6 +1044,13 @@ func (x *GoSettings) GetCommon() *CommonLanguageSettings { return nil } +func (x *GoSettings) GetRenamedServices() map[string]string { + if x != nil { + return x.RenamedServices + } + return nil +} + // Describes the generator configuration for a method. type MethodSettings struct { state protoimpl.MessageState @@ -1123,6 +1149,57 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string { return nil } +// This message is used to configure the generation of a subset of the RPCs in +// a service for client libraries. +type SelectiveGapicGeneration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An allowlist of the fully qualified names of RPCs that should be included + // on public client surfaces. + Methods []string `protobuf:"bytes,1,rep,name=methods,proto3" json:"methods,omitempty"` +} + +func (x *SelectiveGapicGeneration) Reset() { + *x = SelectiveGapicGeneration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SelectiveGapicGeneration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SelectiveGapicGeneration) ProtoMessage() {} + +func (x *SelectiveGapicGeneration) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SelectiveGapicGeneration.ProtoReflect.Descriptor instead. +func (*SelectiveGapicGeneration) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{12} +} + +func (x *SelectiveGapicGeneration) GetMethods() []string { + if x != nil { + return x.Methods + } + return nil +} + // Experimental features to be included during client library generation. // These fields will be deprecated once the feature graduates and is enabled // by default. @@ -1136,12 +1213,17 @@ type PythonSettings_ExperimentalFeatures struct { // This feature will be enabled by default 1 month after launching the // feature in preview packages. RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"` + // Enables generation of protobuf code using new types that are more + // Pythonic which are included in `protobuf>=5.29.x`. This feature will be + // enabled by default 1 month after launching the feature in preview + // packages. + ProtobufPythonicTypesEnabled bool `protobuf:"varint,2,opt,name=protobuf_pythonic_types_enabled,json=protobufPythonicTypesEnabled,proto3" json:"protobuf_pythonic_types_enabled,omitempty"` } func (x *PythonSettings_ExperimentalFeatures) Reset() { *x = PythonSettings_ExperimentalFeatures{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[13] + mi := &file_google_api_client_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1154,7 +1236,7 @@ func (x *PythonSettings_ExperimentalFeatures) String() string { func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {} func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[13] + mi := &file_google_api_client_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1177,6 +1259,13 @@ func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool { return false } +func (x *PythonSettings_ExperimentalFeatures) GetProtobufPythonicTypesEnabled() bool { + if x != nil { + return x.ProtobufPythonicTypesEnabled + } + return false +} + // Describes settings to use when generating API methods that use the // long-running operation pattern. // All default values below are from those used in the client library @@ -1205,7 +1294,7 @@ type MethodSettings_LongRunning struct { func (x *MethodSettings_LongRunning) Reset() { *x = MethodSettings_LongRunning{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[16] + mi := &file_google_api_client_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1218,7 +1307,7 @@ func (x *MethodSettings_LongRunning) String() string { func (*MethodSettings_LongRunning) ProtoMessage() {} func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[16] + mi := &file_google_api_client_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1406,7 +1495,7 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf8, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x30, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, @@ -1415,251 +1504,275 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x05, - 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, - 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, - 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x65, 0x6e, - 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x4e, - 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6a, - 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01, + 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x62, 0x0a, + 0x1a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x70, 0x69, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x93, 0x05, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, + 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, + 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, + 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, + 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, + 0x63, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, + 0x65, 0x73, 0x74, 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, + 0x3d, 0x0a, 0x0d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, + 0x0a, 0x0c, 0x63, 0x70, 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, + 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, + 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, + 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, + 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, + 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, + 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6a, 0x61, - 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x63, 0x70, - 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x70, - 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x70, 0x70, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, 0x70, 0x5f, 0x73, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f, - 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x64, 0x65, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, - 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, - 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x64, 0x6f, 0x74, - 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x72, - 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x75, - 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x67, 0x6f, - 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69, - 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x69, - 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x64, - 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, - 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x69, 0x5f, - 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x68, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x69, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x6f, 0x63, 0x5f, 0x74, - 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x49, 0x0a, - 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x6b, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, - 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, - 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, 0x6c, 0x69, 0x62, 0x72, - 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6d, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6e, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, - 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x72, 0x65, 0x73, - 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, - 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, - 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, - 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, + 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, + 0x74, 0x6e, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, + 0x0e, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x3d, 0x0a, 0x0d, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, + 0x0a, 0x0b, 0x67, 0x6f, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, + 0x69, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, + 0x65, 0x77, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, + 0x2b, 0x0a, 0x11, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x75, 0x72, 0x69, 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, + 0x61, 0x70, 0x69, 0x5f, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x18, 0x68, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, + 0x65, 0x72, 0x5f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, + 0x69, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x47, 0x69, 0x74, 0x68, 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, + 0x6f, 0x63, 0x5f, 0x74, 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x49, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, + 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, + 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x6d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, + 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, + 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, + 0x6e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x1d, 0x72, 0x65, 0x73, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a, + 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, + 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43, + 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, + 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, + 0x6e, 0x22, 0xc5, 0x02, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x12, 0x64, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, + 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, + 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x52, 0x14, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x90, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, + 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x31, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, + 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, + 0x72, 0x65, 0x73, 0x74, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x12, 0x45, 0x0a, 0x1f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x5f, 0x70, + 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x54, 0x79, 0x70, + 0x65, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, + 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, + 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, + 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, + 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x72, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, + 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, 0x6e, 0x6f, + 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, + 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, + 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, + 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, + 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x42, 0x0a, + 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x6f, 0x6e, 0x22, 0xe4, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xfd, 0x01, - 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, - 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, - 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x15, - 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x14, 0x65, 0x78, - 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, - 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x41, - 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, - 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, - 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, - 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, - 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, - 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, - 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, - 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, - 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, - 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, - 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, - 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, - 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, - 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, - 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, - 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, - 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, - 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, - 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, - 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, - 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, - 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, - 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, - 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, - 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x56, 0x0a, + 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, + 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, + 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, + 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, + 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, + 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, + 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, - 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, - 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, - 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, - 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, - 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, - 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, - 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, - 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, - 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, - 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, - 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, - 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, - 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, - 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, - 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, - 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, - 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, + 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, + 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, + 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, + 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, + 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, + 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, + 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x34, + 0x0a, 0x18, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x73, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, + 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, + 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, + 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, + 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, + 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, + 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, + 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, + 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, + 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, + 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, + 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, + 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, + 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -1675,7 +1788,7 @@ func file_google_api_client_proto_rawDescGZIP() []byte { } var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 19) var file_google_api_client_proto_goTypes = []interface{}{ (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination @@ -1691,55 +1804,59 @@ var file_google_api_client_proto_goTypes = []interface{}{ (*RubySettings)(nil), // 11: google.api.RubySettings (*GoSettings)(nil), // 12: google.api.GoSettings (*MethodSettings)(nil), // 13: google.api.MethodSettings - nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry - (*PythonSettings_ExperimentalFeatures)(nil), // 15: google.api.PythonSettings.ExperimentalFeatures - nil, // 16: google.api.DotnetSettings.RenamedServicesEntry - nil, // 17: google.api.DotnetSettings.RenamedResourcesEntry - (*MethodSettings_LongRunning)(nil), // 18: google.api.MethodSettings.LongRunning - (api.LaunchStage)(0), // 19: google.api.LaunchStage - (*durationpb.Duration)(nil), // 20: google.protobuf.Duration - (*descriptorpb.MethodOptions)(nil), // 21: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions + (*SelectiveGapicGeneration)(nil), // 14: google.api.SelectiveGapicGeneration + nil, // 15: google.api.JavaSettings.ServiceClassNamesEntry + (*PythonSettings_ExperimentalFeatures)(nil), // 16: google.api.PythonSettings.ExperimentalFeatures + nil, // 17: google.api.DotnetSettings.RenamedServicesEntry + nil, // 18: google.api.DotnetSettings.RenamedResourcesEntry + nil, // 19: google.api.GoSettings.RenamedServicesEntry + (*MethodSettings_LongRunning)(nil), // 20: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 21: google.api.LaunchStage + (*durationpb.Duration)(nil), // 22: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 23: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 24: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination - 19, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage - 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings - 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings - 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings - 8, // 5: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings - 9, // 6: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings - 10, // 7: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings - 11, // 8: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings - 12, // 9: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings - 13, // 10: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings - 0, // 11: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization - 3, // 12: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings - 14, // 13: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry - 2, // 14: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings - 15, // 18: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures - 2, // 19: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 20: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings - 16, // 21: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry - 17, // 22: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry - 2, // 23: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 24: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings - 18, // 25: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning - 20, // 26: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration - 20, // 27: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration - 20, // 28: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration - 21, // 29: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 22, // 30: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 22, // 31: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 22, // 32: google.api.api_version:extendee -> google.protobuf.ServiceOptions - 33, // [33:33] is the sub-list for method output_type - 33, // [33:33] is the sub-list for method input_type - 33, // [33:33] is the sub-list for extension type_name - 29, // [29:33] is the sub-list for extension extendee - 0, // [0:29] is the sub-list for field type_name + 14, // 1: google.api.CommonLanguageSettings.selective_gapic_generation:type_name -> google.api.SelectiveGapicGeneration + 21, // 2: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 5, // 3: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings + 6, // 4: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings + 7, // 5: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings + 8, // 6: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings + 9, // 7: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings + 10, // 8: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings + 11, // 9: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings + 12, // 10: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings + 13, // 11: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings + 0, // 12: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization + 3, // 13: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings + 15, // 14: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry + 2, // 15: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 16: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 17: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 18: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings + 16, // 19: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures + 2, // 20: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 21: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 17, // 22: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 18, // 23: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 2, // 24: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 25: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 19, // 26: google.api.GoSettings.renamed_services:type_name -> google.api.GoSettings.RenamedServicesEntry + 20, // 27: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 22, // 28: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 22, // 29: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 22, // 30: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 23, // 31: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 24, // 32: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 24, // 33: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 24, // 34: google.api.api_version:extendee -> google.protobuf.ServiceOptions + 35, // [35:35] is the sub-list for method output_type + 35, // [35:35] is the sub-list for method input_type + 35, // [35:35] is the sub-list for extension type_name + 31, // [31:35] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -1892,7 +2009,19 @@ func file_google_api_client_proto_init() { return nil } } - file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_google_api_client_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SelectiveGapicGeneration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PythonSettings_ExperimentalFeatures); i { case 0: return &v.state @@ -1904,7 +2033,7 @@ func file_google_api_client_proto_init() { return nil } } - file_google_api_client_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_google_api_client_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MethodSettings_LongRunning); i { case 0: return &v.state @@ -1923,7 +2052,7 @@ func file_google_api_client_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, NumEnums: 2, - NumMessages: 17, + NumMessages: 19, NumExtensions: 4, NumServices: 0, }, diff --git a/vendor/modules.txt b/vendor/modules.txt index 1580492beeb..0f989a32b49 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,7 +4,7 @@ cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/auth v0.10.2 +# cloud.google.com/go/auth v0.13.0 ## explicit; go 1.21 cloud.google.com/go/auth cloud.google.com/go/auth/credentials @@ -21,10 +21,10 @@ cloud.google.com/go/auth/internal/credsfile cloud.google.com/go/auth/internal/jwt cloud.google.com/go/auth/internal/transport cloud.google.com/go/auth/internal/transport/cert -# cloud.google.com/go/auth/oauth2adapt v0.2.5 +# cloud.google.com/go/auth/oauth2adapt v0.2.6 ## explicit; go 1.21 cloud.google.com/go/auth/oauth2adapt -# cloud.google.com/go/compute/metadata v0.5.2 +# cloud.google.com/go/compute/metadata v0.6.0 ## explicit; go 1.21 cloud.google.com/go/compute/metadata # cloud.google.com/go/iam v1.2.2 @@ -388,9 +388,10 @@ github.com/felixge/fgprof # github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop -# github.com/fsnotify/fsnotify v1.7.0 +# github.com/fsnotify/fsnotify v1.8.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify +github.com/fsnotify/fsnotify/internal # github.com/go-errors/errors v1.4.2 ## explicit; go 1.14 github.com/go-errors/errors @@ -433,8 +434,8 @@ github.com/go-openapi/analysis/internal/flatten/sortref # github.com/go-openapi/errors v0.22.0 ## explicit; go 1.20 github.com/go-openapi/errors -# github.com/go-openapi/jsonpointer v0.20.2 -## explicit; go 1.19 +# github.com/go-openapi/jsonpointer v0.21.0 +## explicit; go 1.20 github.com/go-openapi/jsonpointer # github.com/go-openapi/jsonreference v0.20.4 ## explicit; go 1.19 @@ -557,8 +558,8 @@ github.com/google/gopacket/layers github.com/google/gopacket/pcap github.com/google/gopacket/tcpassembly github.com/google/gopacket/tcpassembly/tcpreader -# github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da -## explicit; go 1.19 +# github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad +## explicit; go 1.22 github.com/google/pprof/profile # github.com/google/s2a-go v0.1.8 ## explicit; go 1.20 @@ -1031,7 +1032,7 @@ github.com/prometheus/common/version # github.com/prometheus/common/sigv4 v0.1.0 ## explicit; go 1.15 github.com/prometheus/common/sigv4 -# github.com/prometheus/exporter-toolkit v0.13.1 +# github.com/prometheus/exporter-toolkit v0.13.2 ## explicit; go 1.22 github.com/prometheus/exporter-toolkit/web # github.com/prometheus/procfs v0.15.1 @@ -1039,8 +1040,8 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v0.0.0-20250102152619-93fa7617c041 -## explicit; go 1.22.0 +# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v0.0.0-20250109135143-114aaaadc203 +## explicit; go 1.22.7 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery github.com/prometheus/prometheus/discovery/refresh @@ -1283,21 +1284,13 @@ go.mongodb.org/mongo-driver/x/bsonx/bsoncore ## explicit; go 1.13 go.opencensus.io go.opencensus.io/internal -go.opencensus.io/internal/tagencoding -go.opencensus.io/metric/metricdata -go.opencensus.io/metric/metricproducer -go.opencensus.io/plugin/ocgrpc -go.opencensus.io/plugin/ochttp -go.opencensus.io/plugin/ochttp/propagation/b3 -go.opencensus.io/resource -go.opencensus.io/stats -go.opencensus.io/stats/internal -go.opencensus.io/stats/view -go.opencensus.io/tag go.opencensus.io/trace go.opencensus.io/trace/internal -go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate +# go.opentelemetry.io/auto/sdk v1.1.0 +## explicit; go 1.22.0 +go.opentelemetry.io/auto/sdk +go.opentelemetry.io/auto/sdk/internal/telemetry # go.opentelemetry.io/collector/pdata v1.22.0 ## explicit; go 1.22.0 go.opentelemetry.io/collector/pdata/internal @@ -1324,18 +1317,18 @@ go.opentelemetry.io/collector/semconv/v1.6.1 ## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.57.0 -## explicit; go 1.22 +# go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0 +## explicit; go 1.22.0 go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconvutil -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 -## explicit; go 1.22 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 +## explicit; go 1.22.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.32.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel v1.33.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage @@ -1350,17 +1343,18 @@ go.opentelemetry.io/otel/semconv/v1.18.0 go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.26.0 -# go.opentelemetry.io/otel/metric v1.32.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/metric v1.33.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop # go.opentelemetry.io/otel/sdk/metric v1.30.0 ## explicit; go 1.22 -# go.opentelemetry.io/otel/trace v1.32.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/trace v1.33.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded +go.opentelemetry.io/otel/trace/noop # go.uber.org/atomic v1.11.0 ## explicit; go 1.18 go.uber.org/atomic @@ -1485,7 +1479,7 @@ golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# google.golang.org/api v0.209.0 +# google.golang.org/api v0.213.0 ## explicit; go 1.21 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -1502,12 +1496,11 @@ google.golang.org/api/storage/v1 google.golang.org/api/transport google.golang.org/api/transport/grpc google.golang.org/api/transport/http -google.golang.org/api/transport/http/internal/propagation # google.golang.org/genproto v0.0.0-20241113202542-65e8d215514f ## explicit; go 1.21 google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr -# google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 +# google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 ## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations @@ -1516,7 +1509,7 @@ google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.68.1 => google.golang.org/grpc v1.65.0 +# google.golang.org/grpc v1.69.0 => google.golang.org/grpc v1.65.0 ## explicit; go 1.21 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1711,7 +1704,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/walk sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 sigs.k8s.io/yaml/goyaml.v3 -# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20250102152619-93fa7617c041 +# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20250109135143-114aaaadc203 # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe # gopkg.in/yaml.v3 => github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094 # github.com/grafana/regexp => github.com/grafana/regexp v0.0.0-20240531075221-3685f1377d7b From 729661fe101967aa4124dde8acbb2bd566d5d873 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Thu, 9 Jan 2025 12:32:39 -0500 Subject: [PATCH 02/18] Enforce persistentVolumeClaimRetentionPolicy Retain policy on partition ingesters during migration to ingest storage (#10395) * Enforce persistentVolumeClaimRetentionPolicy Retain policy on partition ingesters during migration to ingest storage Signed-off-by: Marco Pracucci * Add assertions Signed-off-by: Marco Pracucci * Updated CHANGELOG Signed-off-by: Marco Pracucci * Update CHANGELOG Signed-off-by: Marco Pracucci --------- Signed-off-by: Marco Pracucci --- CHANGELOG.md | 1 + ...st-storage-migration-step-1-generated.yaml | 9 ++++++++ ...st-storage-migration-step-2-generated.yaml | 9 ++++++++ ...st-storage-migration-step-3-generated.yaml | 9 ++++++++ ...st-storage-migration-step-4-generated.yaml | 9 ++++++++ ...t-storage-migration-step-5a-generated.yaml | 9 ++++++++ ...t-storage-migration-step-5b-generated.yaml | 9 ++++++++ ...st-storage-migration-step-6-generated.yaml | 9 ++++++++ ...st-storage-migration-step-7-generated.yaml | 9 ++++++++ ...st-storage-migration-step-8-generated.yaml | 9 ++++++++ .../mimir/ingest-storage-migration.libsonnet | 23 +++++++++++++++++++ operations/mimir/mimir.libsonnet | 2 +- 12 files changed, 106 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cb9ed7c2e96..cfd832a09c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ * [CHANGE] Update rollout-operator version to 0.22.0. #10229 * [CHANGE] Memcached: Update to Memcached 1.6.34. #10318 +* [ENHANCEMENT] Enforce `persistentVolumeClaimRetentionPolicy` `Retain` policy on partition ingesters during migration to experimental ingest storage. #10395 * [BUGFIX] Ports in container rollout-operator. #10273 ### Mimirtool diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-1-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-1-generated.yaml index 83e3b7ed618..5e19e88afbe 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-1-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-1-generated.yaml @@ -1937,6 +1937,9 @@ metadata: name: ingester-zone-a-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: @@ -2215,6 +2218,9 @@ metadata: name: ingester-zone-b-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: @@ -2487,6 +2493,9 @@ metadata: name: ingester-zone-c-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-2-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-2-generated.yaml index c39a3e335fb..a9fafd4b42f 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-2-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-2-generated.yaml @@ -1950,6 +1950,9 @@ metadata: name: ingester-zone-a-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: @@ -2228,6 +2231,9 @@ metadata: name: ingester-zone-b-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: @@ -2500,6 +2506,9 @@ metadata: name: ingester-zone-c-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-3-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-3-generated.yaml index cd5e83a98b5..b68e1ef1775 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-3-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-3-generated.yaml @@ -1972,6 +1972,9 @@ metadata: name: ingester-zone-a-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: @@ -2250,6 +2253,9 @@ metadata: name: ingester-zone-b-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: @@ -2522,6 +2528,9 @@ metadata: name: ingester-zone-c-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-4-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-4-generated.yaml index 3492fd5e57b..182a6fc867c 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-4-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-4-generated.yaml @@ -1970,6 +1970,9 @@ metadata: name: ingester-zone-a-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: @@ -2248,6 +2251,9 @@ metadata: name: ingester-zone-b-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: @@ -2520,6 +2526,9 @@ metadata: name: ingester-zone-c-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-5a-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-5a-generated.yaml index c3cb027ed28..767dd54a535 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-5a-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-5a-generated.yaml @@ -1970,6 +1970,9 @@ metadata: name: ingester-zone-a-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: @@ -2248,6 +2251,9 @@ metadata: name: ingester-zone-b-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: @@ -2520,6 +2526,9 @@ metadata: name: ingester-zone-c-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-5b-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-5b-generated.yaml index 77c9245b621..0810373ecb9 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-5b-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-5b-generated.yaml @@ -1970,6 +1970,9 @@ metadata: name: ingester-zone-a-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: @@ -2248,6 +2251,9 @@ metadata: name: ingester-zone-b-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: @@ -2520,6 +2526,9 @@ metadata: name: ingester-zone-c-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-6-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-6-generated.yaml index 84aeb85e119..1a0656249ce 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-6-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-6-generated.yaml @@ -1763,6 +1763,9 @@ metadata: name: ingester-zone-a-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: @@ -1909,6 +1912,9 @@ metadata: name: ingester-zone-b-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: @@ -2049,6 +2055,9 @@ metadata: name: ingester-zone-c-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-7-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-7-generated.yaml index b7cd7b19684..2129ad02c55 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-7-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-7-generated.yaml @@ -1779,6 +1779,9 @@ metadata: name: ingester-zone-a-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: @@ -1925,6 +1928,9 @@ metadata: name: ingester-zone-b-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: @@ -2065,6 +2071,9 @@ metadata: name: ingester-zone-c-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 1 selector: diff --git a/operations/mimir-tests/test-ingest-storage-migration-step-8-generated.yaml b/operations/mimir-tests/test-ingest-storage-migration-step-8-generated.yaml index b325bf5d6d0..a7f9b2d15e0 100644 --- a/operations/mimir-tests/test-ingest-storage-migration-step-8-generated.yaml +++ b/operations/mimir-tests/test-ingest-storage-migration-step-8-generated.yaml @@ -1779,6 +1779,9 @@ metadata: name: ingester-zone-a-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 0 selector: @@ -1925,6 +1928,9 @@ metadata: name: ingester-zone-b-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 0 selector: @@ -2065,6 +2071,9 @@ metadata: name: ingester-zone-c-partition namespace: default spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain podManagementPolicy: Parallel replicas: 0 selector: diff --git a/operations/mimir/ingest-storage-migration.libsonnet b/operations/mimir/ingest-storage-migration.libsonnet index 5d27d0723ec..f86c41e9b3f 100644 --- a/operations/mimir/ingest-storage-migration.libsonnet +++ b/operations/mimir/ingest-storage-migration.libsonnet @@ -70,6 +70,13 @@ 'grafana.com/prepare-downscale-http-port': '80', }), + local partitionIngesterStatefulSetPolicies = + // We must guarantee that the PVCs are retained by the partition ingesters because during the migration + // we have a step during which the partition ingesters StatefulSet is deleted (one zone at a time) and + // PVCs need to be renamed, but their volumes preserved. + statefulSet.spec.persistentVolumeClaimRetentionPolicy.withWhenScaled('Retain') + + statefulSet.spec.persistentVolumeClaimRetentionPolicy.withWhenDeleted('Retain'), + local gossipLabel = if !$._config.memberlist_ring_enabled then {} else $.apps.v1.statefulSet.spec.template.metadata.withLabelsMixin({ [$._config.gossip_member_label]: 'true' }), @@ -93,6 +100,7 @@ self.newIngesterZoneStatefulSet('a-partition', $.ingester_partition_zone_a_container, $.ingester_partition_zone_a_node_affinity_matchers) + statefulSet.mixin.spec.withReplicas($._config.ingest_storage_migration_partition_ingester_zone_a_replicas) + partitionIngesterStatefulSetLabelsAndAnnotations + + partitionIngesterStatefulSetPolicies + (if !$._config.ingest_storage_migration_partition_ingester_zone_a_scale_down then {} else statefulSet.mixin.spec.withReplicas(0)), ingester_partition_zone_a_service: if !$._config.ingest_storage_migration_partition_ingester_zone_a_enabled then null else @@ -106,6 +114,7 @@ self.newIngesterZoneStatefulSet('b-partition', $.ingester_partition_zone_b_container, $.ingester_partition_zone_b_node_affinity_matchers) + statefulSet.mixin.spec.withReplicas($._config.ingest_storage_migration_partition_ingester_zone_b_replicas) + partitionIngesterStatefulSetLabelsAndAnnotations + + partitionIngesterStatefulSetPolicies + (if !$._config.ingest_storage_migration_partition_ingester_zone_b_scale_down then {} else statefulSet.mixin.spec.withReplicas(0)), ingester_partition_zone_b_service: if !$._config.ingest_storage_migration_partition_ingester_zone_b_enabled then null else @@ -119,6 +128,7 @@ self.newIngesterZoneStatefulSet('c-partition', $.ingester_partition_zone_c_container, $.ingester_partition_zone_c_node_affinity_matchers) + statefulSet.mixin.spec.withReplicas($._config.ingest_storage_migration_partition_ingester_zone_c_replicas) + partitionIngesterStatefulSetLabelsAndAnnotations + + partitionIngesterStatefulSetPolicies + (if !$._config.ingest_storage_migration_partition_ingester_zone_c_scale_down then {} else statefulSet.mixin.spec.withReplicas(0)), ingester_partition_zone_c_service: if !$._config.ingest_storage_migration_partition_ingester_zone_c_enabled then null else @@ -240,3 +250,16 @@ local overrideSuperIfExists(name, override) = if !( name in super) || super[name] == null || super[name] == {} then null else (if override == null then null else super[name] + override), } + +// Assert on required specs, to make sure they don't get overridden elsewhere. These assertions are +// executed at a later stage in the jsonnet evaluation, so they can detect overrides done in files +// imported after this one too. +{ + assert $.ingester_partition_zone_a_statefulset == null || $.ingester_partition_zone_a_statefulset.spec.persistentVolumeClaimRetentionPolicy.whenScaled == 'Retain' : 'persistentVolumeClaimRetentionPolicy.whenScaled must be set to Retain on ingester_partition_zone_a_statefulset', + assert $.ingester_partition_zone_b_statefulset == null || $.ingester_partition_zone_b_statefulset.spec.persistentVolumeClaimRetentionPolicy.whenScaled == 'Retain' : 'persistentVolumeClaimRetentionPolicy.whenScaled must be set to Retain on ingester_partition_zone_b_statefulset', + assert $.ingester_partition_zone_c_statefulset == null || $.ingester_partition_zone_c_statefulset.spec.persistentVolumeClaimRetentionPolicy.whenScaled == 'Retain' : 'persistentVolumeClaimRetentionPolicy.whenScaled must be set to Retain on ingester_partition_zone_c_statefulset', + + assert $.ingester_partition_zone_a_statefulset == null || $.ingester_partition_zone_a_statefulset.spec.persistentVolumeClaimRetentionPolicy.whenDeleted == 'Retain' : 'persistentVolumeClaimRetentionPolicy.whenDeleted must be set to Retain on ingester_partition_zone_a_statefulset', + assert $.ingester_partition_zone_b_statefulset == null || $.ingester_partition_zone_b_statefulset.spec.persistentVolumeClaimRetentionPolicy.whenDeleted == 'Retain' : 'persistentVolumeClaimRetentionPolicy.whenDeleted must be set to Retain on ingester_partition_zone_b_statefulset', + assert $.ingester_partition_zone_c_statefulset == null || $.ingester_partition_zone_c_statefulset.spec.persistentVolumeClaimRetentionPolicy.whenDeleted == 'Retain' : 'persistentVolumeClaimRetentionPolicy.whenDeleted must be set to Retain on ingester_partition_zone_c_statefulset', +} diff --git a/operations/mimir/mimir.libsonnet b/operations/mimir/mimir.libsonnet index f559ae89e29..2efda5339e4 100644 --- a/operations/mimir/mimir.libsonnet +++ b/operations/mimir/mimir.libsonnet @@ -53,7 +53,7 @@ // Support for ReplicaTemplate objects. (import 'replica-template.libsonnet') + -// Experimental ingest storage. +// Experimental ingest storage. Keep this at the end, because we need to override components on top of other changes. (import 'ingest-storage.libsonnet') + (import 'ingest-storage-ingester-autoscaling.libsonnet') + (import 'ingest-storage-migration.libsonnet') + From 27f0faf41ac8a616d0ca3b3d182a9562e297745f Mon Sep 17 00:00:00 2001 From: Dimitar Dimitrov Date: Thu, 9 Jan 2025 19:41:56 +0100 Subject: [PATCH 03/18] ruler: use max_retries_rate flag (#10393) --- pkg/mimir/modules.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/mimir/modules.go b/pkg/mimir/modules.go index 5070033bafc..2761a885526 100644 --- a/pkg/mimir/modules.go +++ b/pkg/mimir/modules.go @@ -859,7 +859,7 @@ func (t *Mimir) initRuler() (serv services.Service, err error) { if err != nil { return nil, err } - remoteQuerier := ruler.NewRemoteQuerier(queryFrontendClient, t.Cfg.Querier.EngineConfig.Timeout, 1, t.Cfg.Ruler.QueryFrontend.QueryResultResponseFormat, t.Cfg.API.PrometheusHTTPPrefix, util_log.Logger, ruler.WithOrgIDMiddleware) + remoteQuerier := ruler.NewRemoteQuerier(queryFrontendClient, t.Cfg.Querier.EngineConfig.Timeout, t.Cfg.Ruler.QueryFrontend.MaxRetriesRate, t.Cfg.Ruler.QueryFrontend.QueryResultResponseFormat, t.Cfg.API.PrometheusHTTPPrefix, util_log.Logger, ruler.WithOrgIDMiddleware) embeddedQueryable = prom_remote.NewSampleAndChunkQueryableClient( remoteQuerier, From e7d8d36a22a0569859b55b7f021e4ebdc8cbbcd0 Mon Sep 17 00:00:00 2001 From: Vladimir Varankin Date: Thu, 9 Jan 2025 21:18:46 +0100 Subject: [PATCH 04/18] blockbuilder: fix panic when closing tsdb after failing to upload block (#10391) Signed-off-by: Vladimir Varankin --- pkg/blockbuilder/tsdb.go | 30 +++++++++++++----------------- pkg/blockbuilder/tsdb_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 17 deletions(-) diff --git a/pkg/blockbuilder/tsdb.go b/pkg/blockbuilder/tsdb.go index ee2d610fe78..a3fcaf0c6ff 100644 --- a/pkg/blockbuilder/tsdb.go +++ b/pkg/blockbuilder/tsdb.go @@ -316,30 +316,26 @@ type blockUploader func(_ context.Context, tenantID, dbDir string, blockIDs []st // All the DBs are closed and directories cleared irrespective of success or failure of this function. func (b *TSDBBuilder) CompactAndUpload(ctx context.Context, uploadBlocks blockUploader) (_ int, err error) { var ( - doneDBsMu sync.Mutex - doneDBs = make(map[*userTSDB]bool) + closedDBsMu sync.Mutex + closedDBs = make(map[*userTSDB]bool) ) b.tsdbsMu.Lock() defer func() { - b.tsdbsMu.Unlock() - var merr multierror.MultiError merr.Add(err) - // If some TSDB was not compacted or uploaded, it will be re-tried in the next cycle, so we remove it here. + // If some TSDB was not compacted or uploaded, it will be re-tried in the next cycle, so we always remove it here. for _, db := range b.tsdbs { - if doneDBs[db] { - continue + if !closedDBs[db] { + merr.Add(db.Close()) } - dbDir := db.Dir() - merr.Add(db.Close()) - merr.Add(os.RemoveAll(dbDir)) + merr.Add(os.RemoveAll(db.Dir())) } - - err = merr.Err() - // Clear the map so that it can be released from the memory. Not setting to nil in case we want to reuse the TSDBBuilder. clear(b.tsdbs) + b.tsdbsMu.Unlock() + + err = merr.Err() }() level.Info(b.logger).Log("msg", "compacting and uploading blocks", "num_tsdb", len(b.tsdbs)) @@ -384,14 +380,14 @@ func (b *TSDBBuilder) CompactAndUpload(ctx context.Context, uploadBlocks blockUp return err } + closedDBsMu.Lock() + closedDBs[db] = true + closedDBsMu.Unlock() + if err := uploadBlocks(ctx, tenant.tenantID, dbDir, blockIDs); err != nil { return err } - doneDBsMu.Lock() - doneDBs[db] = true - doneDBsMu.Unlock() - // Clear the DB from the disk. Don't need it anymore. return os.RemoveAll(dbDir) }) diff --git a/pkg/blockbuilder/tsdb_test.go b/pkg/blockbuilder/tsdb_test.go index 9b1ad07cfc7..ff36eb18301 100644 --- a/pkg/blockbuilder/tsdb_test.go +++ b/pkg/blockbuilder/tsdb_test.go @@ -281,6 +281,30 @@ func TestTSDBBuilder(t *testing.T) { } } +func TestTSDBBuilder_CompactAndUpload_fail(t *testing.T) { + overrides, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) + require.NoError(t, err) + metrics := newTSDBBBuilderMetrics(prometheus.NewPedanticRegistry()) + builder := NewTSDBBuilder(log.NewNopLogger(), t.TempDir(), mimir_tsdb.BlocksStorageConfig{}, overrides, metrics, 0) + t.Cleanup(func() { + require.NoError(t, builder.Close()) + }) + + userID := strconv.Itoa(rand.Int()) + tenant := tsdbTenant{ + partitionID: 0, + tenantID: userID, + } + _, err = builder.getOrCreateTSDB(tenant) + require.NoError(t, err) + + errUploadFailed := fmt.Errorf("upload failed") + _, err = builder.CompactAndUpload(context.Background(), func(_ context.Context, _, _ string, _ []string) error { + return errUploadFailed + }) + require.ErrorIs(t, err, errUploadFailed) +} + func compareQuery(t *testing.T, db *tsdb.DB, expSamples []mimirpb.Sample, expHistograms []mimirpb.Histogram, matchers ...*labels.Matcher) { querier, err := db.Querier(math.MinInt64, math.MaxInt64) require.NoError(t, err) From 56c9edd3c4bd1f36ac25e78795af1282555f9d72 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Fri, 10 Jan 2025 15:17:55 +1100 Subject: [PATCH 05/18] MQE: fix issues with vector/vector binary comparsion operations (#10235) * Add early filtering test case for group_right * Fix issue where comparison operations without the bool modifier would return incorrect results if the left side contained series with different metric names * Avoid expensive `labels.Labels.String()` call * Fix issue where comparison operations between two vectors incorrectly fail with a conflict if multiple left series match the same right series and only one left point remains after applying the comparison * Add comparison operator benchmark * Improve performance for case where left side is smaller than right * Extract some methods --- pkg/streamingpromql/benchmarks/benchmarks.go | 3 + .../operators/binops/binary_operation.go | 39 +++ .../grouped_vector_vector_binary_operation.go | 43 +-- ...e_to_one_vector_vector_binary_operation.go | 301 ++++++++++++------ ...one_vector_vector_binary_operation_test.go | 100 +++--- pkg/streamingpromql/query.go | 2 +- .../testdata/ours/binary_operators.test | 27 +- 7 files changed, 318 insertions(+), 197 deletions(-) diff --git a/pkg/streamingpromql/benchmarks/benchmarks.go b/pkg/streamingpromql/benchmarks/benchmarks.go index 597c2704b37..b49133cdec8 100644 --- a/pkg/streamingpromql/benchmarks/benchmarks.go +++ b/pkg/streamingpromql/benchmarks/benchmarks.go @@ -158,6 +158,9 @@ func TestCases(metricSizes []int) []BenchCase { { Expr: "nh_X / a_X", }, + { + Expr: "a_X == b_X", + }, { Expr: "2 * a_X", }, diff --git a/pkg/streamingpromql/operators/binops/binary_operation.go b/pkg/streamingpromql/operators/binops/binary_operation.go index b499ad4f387..3bc3a82e2f6 100644 --- a/pkg/streamingpromql/operators/binops/binary_operation.go +++ b/pkg/streamingpromql/operators/binops/binary_operation.go @@ -5,9 +5,11 @@ package binops import ( "fmt" "slices" + "time" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser/posrange" @@ -82,6 +84,43 @@ func groupLabelsFunc(vectorMatching parser.VectorMatching, op parser.ItemType, r } } +func formatConflictError( + firstConflictingSeriesIndex int, + secondConflictingSeriesIndex int, + description string, + ts int64, + sourceSeriesMetadata []types.SeriesMetadata, + side string, + vectorMatching parser.VectorMatching, + op parser.ItemType, + returnBool bool, +) error { + firstConflictingSeriesLabels := sourceSeriesMetadata[firstConflictingSeriesIndex].Labels + groupLabels := groupLabelsFunc(vectorMatching, op, returnBool)(firstConflictingSeriesLabels) + + if secondConflictingSeriesIndex == -1 { + return fmt.Errorf( + "found %s for the match group %s on the %s side of the operation at timestamp %s", + description, + groupLabels, + side, + timestamp.Time(ts).Format(time.RFC3339Nano), + ) + } + + secondConflictingSeriesLabels := sourceSeriesMetadata[secondConflictingSeriesIndex].Labels + + return fmt.Errorf( + "found %s for the match group %s on the %s side of the operation at timestamp %s: %s and %s", + description, + groupLabels, + side, + timestamp.Time(ts).Format(time.RFC3339Nano), + firstConflictingSeriesLabels, + secondConflictingSeriesLabels, + ) +} + // filterSeries returns data filtered based on the mask provided. // // mask is expected to contain one value for each time step in the query time range. diff --git a/pkg/streamingpromql/operators/binops/grouped_vector_vector_binary_operation.go b/pkg/streamingpromql/operators/binops/grouped_vector_vector_binary_operation.go index 206044c3051..2bdda7d501a 100644 --- a/pkg/streamingpromql/operators/binops/grouped_vector_vector_binary_operation.go +++ b/pkg/streamingpromql/operators/binops/grouped_vector_vector_binary_operation.go @@ -11,10 +11,8 @@ import ( "fmt" "slices" "sort" - "time" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser/posrange" "github.com/prometheus/prometheus/util/annotations" @@ -618,13 +616,13 @@ func (g *GroupedVectorVectorBinaryOperation) updateOneSidePresence(side *oneSide for _, p := range seriesData.Floats { if otherSeriesIdx := matchGroup.updatePresence(g.timeRange.PointIndex(p.T), seriesIdx); otherSeriesIdx != -1 { - return g.formatConflictError(otherSeriesIdx, seriesIdx, "duplicate series", p.T, g.oneSideMetadata, g.oneSideHandedness()) + return formatConflictError(otherSeriesIdx, seriesIdx, "duplicate series", p.T, g.oneSideMetadata, g.oneSideHandedness(), g.VectorMatching, g.Op, g.ReturnBool) } } for _, p := range seriesData.Histograms { if otherSeriesIdx := matchGroup.updatePresence(g.timeRange.PointIndex(p.T), seriesIdx); otherSeriesIdx != -1 { - return g.formatConflictError(otherSeriesIdx, seriesIdx, "duplicate series", p.T, g.oneSideMetadata, g.oneSideHandedness()) + return formatConflictError(otherSeriesIdx, seriesIdx, "duplicate series", p.T, g.oneSideMetadata, g.oneSideHandedness(), g.VectorMatching, g.Op, g.ReturnBool) } } } @@ -646,7 +644,8 @@ func (g *GroupedVectorVectorBinaryOperation) mergeOneSide(data []types.InstantVe } if conflict != nil { - return types.InstantVectorSeriesData{}, g.formatConflictError(conflict.FirstConflictingSeriesIndex, conflict.SecondConflictingSeriesIndex, conflict.Description, conflict.Timestamp, g.oneSideMetadata, g.oneSideHandedness()) + err := formatConflictError(conflict.FirstConflictingSeriesIndex, conflict.SecondConflictingSeriesIndex, conflict.Description, conflict.Timestamp, g.oneSideMetadata, g.oneSideHandedness(), g.VectorMatching, g.Op, g.ReturnBool) + return types.InstantVectorSeriesData{}, err } return merged, nil @@ -689,40 +688,6 @@ func (g *GroupedVectorVectorBinaryOperation) mergeManySide(data []types.InstantV return merged, nil } -func (g *GroupedVectorVectorBinaryOperation) formatConflictError( - firstConflictingSeriesIndex int, - secondConflictingSeriesIndex int, - description string, - ts int64, - sourceSeriesMetadata []types.SeriesMetadata, - side string, -) error { - firstConflictingSeriesLabels := sourceSeriesMetadata[firstConflictingSeriesIndex].Labels - groupLabels := groupLabelsFunc(g.VectorMatching, g.Op, g.ReturnBool)(firstConflictingSeriesLabels) - - if secondConflictingSeriesIndex == -1 { - return fmt.Errorf( - "found %s for the match group %s on the %s side of the operation at timestamp %s", - description, - groupLabels, - side, - timestamp.Time(ts).Format(time.RFC3339Nano), - ) - } - - secondConflictingSeriesLabels := sourceSeriesMetadata[secondConflictingSeriesIndex].Labels - - return fmt.Errorf( - "found %s for the match group %s on the %s side of the operation at timestamp %s: %s and %s", - description, - groupLabels, - side, - timestamp.Time(ts).Format(time.RFC3339Nano), - firstConflictingSeriesLabels, - secondConflictingSeriesLabels, - ) -} - func (g *GroupedVectorVectorBinaryOperation) oneSideHandedness() string { switch g.VectorMatching.Card { case parser.CardOneToMany: diff --git a/pkg/streamingpromql/operators/binops/one_to_one_vector_vector_binary_operation.go b/pkg/streamingpromql/operators/binops/one_to_one_vector_vector_binary_operation.go index 70f1204d7bf..588cdfbf98c 100644 --- a/pkg/streamingpromql/operators/binops/one_to_one_vector_vector_binary_operation.go +++ b/pkg/streamingpromql/operators/binops/one_to_one_vector_vector_binary_operation.go @@ -7,13 +7,11 @@ package binops import ( "context" - "fmt" "math" "sort" - "time" "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser/posrange" "github.com/prometheus/prometheus/util/annotations" @@ -48,13 +46,14 @@ type OneToOneVectorVectorBinaryOperation struct { expressionPosition posrange.PositionRange annotations *annotations.Annotations + timeRange types.QueryTimeRange } var _ types.InstantVectorOperator = &OneToOneVectorVectorBinaryOperation{} type oneToOneBinaryOperationOutputSeries struct { - leftSeriesIndices []int - rightSeriesIndices []int + leftSeriesIndices []int + rightSide *oneToOneBinaryOperationRightSide } // latestLeftSeries returns the index of the last series from the left source needed for this output series. @@ -66,9 +65,43 @@ func (s oneToOneBinaryOperationOutputSeries) latestLeftSeries() int { // latestRightSeries returns the index of the last series from the right source needed for this output series. // -// It assumes that rightSeriesIndices is sorted in ascending order. +// It assumes that rightSide.rightSeriesIndices is sorted in ascending order. func (s oneToOneBinaryOperationOutputSeries) latestRightSeries() int { - return s.rightSeriesIndices[len(s.rightSeriesIndices)-1] + return s.rightSide.rightSeriesIndices[len(s.rightSide.rightSeriesIndices)-1] +} + +type oneToOneBinaryOperationRightSide struct { + // If this right side is used for multiple output series and has not been populated, rightSeriesIndices will not be nil. + // If this right side has been populated, rightSeriesIndices will be nil. + rightSeriesIndices []int + mergedData types.InstantVectorSeriesData + + // The number of output series that use the same series from the right side. + // Will only be greater than 1 for comparison binary operations without the bool modifier + // where the input series on the left side have different metric names. + outputSeriesCount int + + // Time steps at which we've seen samples for any left side that matches with this right side. + // Each value is the index of the source series of the sample, or -1 if no sample has been seen for this time step yet. + leftSidePresence []int +} + +// updatePresence records the presence of a sample from the left side series with index seriesIdx at the timestamp with index timestampIdx. +// +// If there is already a sample present from another series at the same timestamp, updatePresence returns that series' index, or +// -1 if there was no sample present at the same timestamp from another series. +func (g *oneToOneBinaryOperationRightSide) updatePresence(timestampIdx int64, seriesIdx int) int { + if existing := g.leftSidePresence[timestampIdx]; existing != -1 { + return existing + } + + g.leftSidePresence[timestampIdx] = seriesIdx + return -1 +} + +type oneToOneBinaryOperationOutputSeriesWithLabels struct { + labels labels.Labels + series *oneToOneBinaryOperationOutputSeries } func NewOneToOneVectorVectorBinaryOperation( @@ -80,6 +113,7 @@ func NewOneToOneVectorVectorBinaryOperation( memoryConsumptionTracker *limiting.MemoryConsumptionTracker, annotations *annotations.Annotations, expressionPosition posrange.PositionRange, + timeRange types.QueryTimeRange, ) (*OneToOneVectorVectorBinaryOperation, error) { e, err := newVectorVectorBinaryOperationEvaluator(op, returnBool, memoryConsumptionTracker, annotations, expressionPosition) if err != nil { @@ -97,6 +131,7 @@ func NewOneToOneVectorVectorBinaryOperation( evaluator: e, expressionPosition: expressionPosition, annotations: annotations, + timeRange: timeRange, } return b, nil @@ -182,91 +217,124 @@ func (b *OneToOneVectorVectorBinaryOperation) loadSeriesMetadata(ctx context.Con // - a list indicating which series from the left side are needed to compute the output // - a list indicating which series from the right side are needed to compute the output func (b *OneToOneVectorVectorBinaryOperation) computeOutputSeries() ([]types.SeriesMetadata, []*oneToOneBinaryOperationOutputSeries, []bool, []bool, error) { - labelsFunc := groupLabelsFunc(b.VectorMatching, b.Op, b.ReturnBool) groupKeyFunc := vectorMatchingGroupKeyFunc(b.VectorMatching) - outputSeriesMap := map[string]*oneToOneBinaryOperationOutputSeries{} - // Use the smaller side to populate the map of possible output series first. - // This should ensure we don't unnecessarily populate the output series map with series that will never match in most cases. - // (It's possible that all the series on the larger side all belong to the same group, but this is expected to be rare.) - smallerSide := b.leftMetadata - largerSide := b.rightMetadata - smallerSideIsLeftSide := len(b.leftMetadata) < len(b.rightMetadata) + // If the left side is smaller than the right, build a map of the possible groups from the left side + // to allow us to avoid creating unnecessary groups when iterating through the right side in computeRightSideGroups. + // This optimisation assumes that most series on either side match at most one series on the other side, + // which is generally true for one-to-one matching. + // FIXME: a possible improvement would be to only bother with this if the left side is significantly smaller + var leftSideGroupsMap map[string]struct{} - if !smallerSideIsLeftSide { - smallerSide = b.rightMetadata - largerSide = b.leftMetadata + if len(b.leftMetadata) < len(b.rightMetadata) { + leftSideGroupsMap = b.computeLeftSideGroups(groupKeyFunc) } - for idx, s := range smallerSide { - groupKey := groupKeyFunc(s.Labels) - series, exists := outputSeriesMap[string(groupKey)] // Important: don't extract the string(...) call here - passing it directly allows us to avoid allocating it. + rightSideGroupsMap := b.computeRightSideGroups(leftSideGroupsMap, groupKeyFunc) - if !exists { - series = &oneToOneBinaryOperationOutputSeries{} - outputSeriesMap[string(groupKey)] = series - } + outputSeriesMap := map[string]oneToOneBinaryOperationOutputSeriesWithLabels{} - if smallerSideIsLeftSide { - series.leftSeriesIndices = append(series.leftSeriesIndices, idx) - } else { - series.rightSeriesIndices = append(series.rightSeriesIndices, idx) - } + leftSeriesUsed, err := types.BoolSlicePool.Get(len(b.leftMetadata), b.MemoryConsumptionTracker) + if err != nil { + return nil, nil, nil, nil, err } - for idx, s := range largerSide { - groupKey := groupKeyFunc(s.Labels) + rightSeriesUsed, err := types.BoolSlicePool.Get(len(b.rightMetadata), b.MemoryConsumptionTracker) + if err != nil { + return nil, nil, nil, nil, err + } - // Important: don't extract the string(...) call below - passing it directly allows us to avoid allocating it. - if series, exists := outputSeriesMap[string(groupKey)]; exists { - if smallerSideIsLeftSide { - // Currently iterating through right side. - series.rightSeriesIndices = append(series.rightSeriesIndices, idx) - } else { - series.leftSeriesIndices = append(series.leftSeriesIndices, idx) + leftSeriesUsed = leftSeriesUsed[:len(b.leftMetadata)] + rightSeriesUsed = rightSeriesUsed[:len(b.rightMetadata)] + labelsFunc := groupLabelsFunc(b.VectorMatching, b.Op, b.ReturnBool) + outputSeriesLabelsBytes := make([]byte, 0, 1024) + + for leftSeriesIndex, s := range b.leftMetadata { + outputSeriesLabels := labelsFunc(s.Labels) + outputSeriesLabelsBytes = outputSeriesLabels.Bytes(outputSeriesLabelsBytes) // FIXME: it'd be better if we could just get the underlying byte slice without copying here + outputSeries, exists := outputSeriesMap[string(outputSeriesLabelsBytes)] + + if !exists { + groupKey := groupKeyFunc(s.Labels) + + // Important: don't extract the string(...) call below - passing it directly allows us to avoid allocating it. + rightSide, exists := rightSideGroupsMap[string(groupKey)] + + if !exists { + // No matching series on the right side. + continue + } + + if rightSide.outputSeriesCount == 0 { + // First output series the right side has matched to. + for _, rightSeriesIndex := range rightSide.rightSeriesIndices { + rightSeriesUsed[rightSeriesIndex] = true + } } - } - } - // Remove series that cannot produce samples. - for seriesLabels, outputSeries := range outputSeriesMap { - if len(outputSeries.leftSeriesIndices) == 0 || len(outputSeries.rightSeriesIndices) == 0 { - // No matching series on at least one side for this output series, so output series will have no samples. Remove it. - delete(outputSeriesMap, seriesLabels) + rightSide.outputSeriesCount++ + + outputSeries = oneToOneBinaryOperationOutputSeriesWithLabels{ + labels: outputSeriesLabels, + series: &oneToOneBinaryOperationOutputSeries{rightSide: rightSide}, + } + + outputSeriesMap[string(outputSeriesLabelsBytes)] = outputSeries } + + outputSeries.series.leftSeriesIndices = append(outputSeries.series.leftSeriesIndices, leftSeriesIndex) + leftSeriesUsed[leftSeriesIndex] = true } allMetadata := types.GetSeriesMetadataSlice(len(outputSeriesMap)) allSeries := make([]*oneToOneBinaryOperationOutputSeries, 0, len(outputSeriesMap)) - leftSeriesUsed, err := types.BoolSlicePool.Get(len(b.leftMetadata), b.MemoryConsumptionTracker) - if err != nil { - return nil, nil, nil, nil, err + for _, outputSeries := range outputSeriesMap { + allMetadata = append(allMetadata, types.SeriesMetadata{Labels: outputSeries.labels}) + allSeries = append(allSeries, outputSeries.series) } - rightSeriesUsed, err := types.BoolSlicePool.Get(len(b.rightMetadata), b.MemoryConsumptionTracker) - if err != nil { - return nil, nil, nil, nil, err + return allMetadata, allSeries, leftSeriesUsed, rightSeriesUsed, nil +} + +func (b *OneToOneVectorVectorBinaryOperation) computeLeftSideGroups(groupKeyFunc func(labels.Labels) []byte) map[string]struct{} { + m := map[string]struct{}{} + + for _, s := range b.leftMetadata { + groupKey := groupKeyFunc(s.Labels) + if _, exists := m[string(groupKey)]; !exists { + m[string(groupKey)] = struct{}{} + } } - leftSeriesUsed = leftSeriesUsed[:len(b.leftMetadata)] - rightSeriesUsed = rightSeriesUsed[:len(b.rightMetadata)] + return m +} - for _, outputSeries := range outputSeriesMap { - firstSeriesLabels := b.leftMetadata[outputSeries.leftSeriesIndices[0]].Labels - allMetadata = append(allMetadata, types.SeriesMetadata{Labels: labelsFunc(firstSeriesLabels)}) - allSeries = append(allSeries, outputSeries) +func (b *OneToOneVectorVectorBinaryOperation) computeRightSideGroups(leftSideGroupsMap map[string]struct{}, groupKeyFunc func(labels.Labels) []byte) map[string]*oneToOneBinaryOperationRightSide { + m := map[string]*oneToOneBinaryOperationRightSide{} + + for idx, s := range b.rightMetadata { + groupKey := groupKeyFunc(s.Labels) + + if leftSideGroupsMap != nil { + // Left side is smaller than the right, check if there's any series on the left that could match this right side series. - for _, leftSeriesIndex := range outputSeries.leftSeriesIndices { - leftSeriesUsed[leftSeriesIndex] = true + if _, exists := leftSideGroupsMap[string(groupKey)]; !exists { + continue + } } - for _, rightSeriesIndex := range outputSeries.rightSeriesIndices { - rightSeriesUsed[rightSeriesIndex] = true + group, exists := m[string(groupKey)] // Important: don't extract the string(...) call here - passing it directly allows us to avoid allocating it. + + if !exists { + group = &oneToOneBinaryOperationRightSide{} + m[string(groupKey)] = group } + + group.rightSeriesIndices = append(group.rightSeriesIndices, idx) } - return allMetadata, allSeries, leftSeriesUsed, rightSeriesUsed, nil + return m } // sortSeries sorts metadata and series in place to try to minimise the number of input series we'll need to buffer in memory. @@ -350,28 +418,98 @@ func (b *OneToOneVectorVectorBinaryOperation) NextSeries(ctx context.Context) (t thisSeries := b.remainingSeries[0] b.remainingSeries = b.remainingSeries[1:] + rightSide := thisSeries.rightSide + + if rightSide.rightSeriesIndices != nil { + // Right side hasn't been populated yet. + if err := b.populateRightSide(ctx, rightSide); err != nil { + return types.InstantVectorSeriesData{}, err + } + } + + // We don't need to return thisSeries.rightSide.mergedData here - computeResult will return it below if this is the last output series that references this right side. + rightSide.outputSeriesCount-- + canMutateRightSide := rightSide.outputSeriesCount == 0 allLeftSeries, err := b.leftBuffer.GetSeries(ctx, thisSeries.leftSeriesIndices) if err != nil { return types.InstantVectorSeriesData{}, err } - mergedLeftSide, err := b.mergeSingleSide(allLeftSeries, thisSeries.leftSeriesIndices, b.leftMetadata, "left") + for i, leftSeries := range allLeftSeries { + isLastLeftSeries := i == len(allLeftSeries)-1 + + allLeftSeries[i], err = b.evaluator.computeResult(leftSeries, rightSide.mergedData, true, canMutateRightSide && isLastLeftSeries) + if err != nil { + return types.InstantVectorSeriesData{}, err + } + + // If the right side matches to many output series, check for conflicts between those left side series. + if rightSide.leftSidePresence != nil { + seriesIdx := thisSeries.leftSeriesIndices[i] + + if err := b.updateLeftSidePresence(rightSide, allLeftSeries[i], seriesIdx); err != nil { + return types.InstantVectorSeriesData{}, err + } + } + } + + mergedResult, err := b.mergeSingleSide(allLeftSeries, thisSeries.leftSeriesIndices, b.leftMetadata, "left") if err != nil { return types.InstantVectorSeriesData{}, err } - allRightSeries, err := b.rightBuffer.GetSeries(ctx, thisSeries.rightSeriesIndices) + if rightSide.leftSidePresence != nil && rightSide.outputSeriesCount == 0 { + types.IntSlicePool.Put(rightSide.leftSidePresence, b.MemoryConsumptionTracker) + } + + return mergedResult, nil +} + +func (b *OneToOneVectorVectorBinaryOperation) populateRightSide(ctx context.Context, rightSide *oneToOneBinaryOperationRightSide) error { + allRightSeries, err := b.rightBuffer.GetSeries(ctx, rightSide.rightSeriesIndices) if err != nil { - return types.InstantVectorSeriesData{}, err + return err } - mergedRightSide, err := b.mergeSingleSide(allRightSeries, thisSeries.rightSeriesIndices, b.rightMetadata, "right") + rightSide.mergedData, err = b.mergeSingleSide(allRightSeries, rightSide.rightSeriesIndices, b.rightMetadata, "right") if err != nil { - return types.InstantVectorSeriesData{}, err + return err + } + + if rightSide.outputSeriesCount > 1 { + rightSide.leftSidePresence, err = types.IntSlicePool.Get(b.timeRange.StepCount, b.MemoryConsumptionTracker) + if err != nil { + return err + } + + rightSide.leftSidePresence = rightSide.leftSidePresence[:b.timeRange.StepCount] + + for i := range rightSide.leftSidePresence { + rightSide.leftSidePresence[i] = -1 + } + } + + // Signal that the right side has been populated. + rightSide.rightSeriesIndices = nil + + return nil +} + +func (b *OneToOneVectorVectorBinaryOperation) updateLeftSidePresence(rightSide *oneToOneBinaryOperationRightSide, leftSideData types.InstantVectorSeriesData, leftSideSeriesIdx int) error { + for _, p := range leftSideData.Floats { + if otherSeriesIdx := rightSide.updatePresence(b.timeRange.PointIndex(p.T), leftSideSeriesIdx); otherSeriesIdx != -1 { + return formatConflictError(otherSeriesIdx, leftSideSeriesIdx, "duplicate series", p.T, b.leftMetadata, "left", b.VectorMatching, b.Op, b.ReturnBool) + } + } + + for _, p := range leftSideData.Histograms { + if otherSeriesIdx := rightSide.updatePresence(b.timeRange.PointIndex(p.T), leftSideSeriesIdx); otherSeriesIdx != -1 { + return formatConflictError(otherSeriesIdx, leftSideSeriesIdx, "duplicate series", p.T, b.leftMetadata, "left", b.VectorMatching, b.Op, b.ReturnBool) + } } - return b.evaluator.computeResult(mergedLeftSide, mergedRightSide, true, true) + return nil } // mergeSingleSide exists to handle the case where one side of an output series has different source series at different time steps. @@ -402,30 +540,7 @@ func (b *OneToOneVectorVectorBinaryOperation) mergeSingleSide(data []types.Insta } func (b *OneToOneVectorVectorBinaryOperation) mergeConflictToError(conflict *operators.MergeConflict, sourceSeriesMetadata []types.SeriesMetadata, side string) error { - firstConflictingSeriesLabels := sourceSeriesMetadata[conflict.FirstConflictingSeriesIndex].Labels - groupLabels := groupLabelsFunc(b.VectorMatching, b.Op, b.ReturnBool)(firstConflictingSeriesLabels) - - if conflict.SecondConflictingSeriesIndex == -1 { - return fmt.Errorf( - "found %s for the match group %s on the %s side of the operation at timestamp %s", - conflict.Description, - groupLabels, - side, - timestamp.Time(conflict.Timestamp).Format(time.RFC3339Nano), - ) - } - - secondConflictingSeriesLabels := sourceSeriesMetadata[conflict.SecondConflictingSeriesIndex].Labels - - return fmt.Errorf( - "found %s for the match group %s on the %s side of the operation at timestamp %s: %s and %s", - conflict.Description, - groupLabels, - side, - timestamp.Time(conflict.Timestamp).Format(time.RFC3339Nano), - firstConflictingSeriesLabels, - secondConflictingSeriesLabels, - ) + return formatConflictError(conflict.FirstConflictingSeriesIndex, conflict.SecondConflictingSeriesIndex, conflict.Description, conflict.Timestamp, sourceSeriesMetadata, side, b.VectorMatching, b.Op, b.ReturnBool) } func (b *OneToOneVectorVectorBinaryOperation) Close() { diff --git a/pkg/streamingpromql/operators/binops/one_to_one_vector_vector_binary_operation_test.go b/pkg/streamingpromql/operators/binops/one_to_one_vector_vector_binary_operation_test.go index bd697398b3a..0559f775f23 100644 --- a/pkg/streamingpromql/operators/binops/one_to_one_vector_vector_binary_operation_test.go +++ b/pkg/streamingpromql/operators/binops/one_to_one_vector_vector_binary_operation_test.go @@ -230,8 +230,8 @@ func TestOneToOneVectorVectorBinaryOperation_Sorting(t *testing.T) { "single output series": { series: []*oneToOneBinaryOperationOutputSeries{ { - leftSeriesIndices: []int{4}, - rightSeriesIndices: []int{1}, + leftSeriesIndices: []int{4}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{1}}, }, }, @@ -241,12 +241,12 @@ func TestOneToOneVectorVectorBinaryOperation_Sorting(t *testing.T) { "two output series, both with one input series, read from both sides in same order and already sorted correctly": { series: []*oneToOneBinaryOperationOutputSeries{ { - leftSeriesIndices: []int{1}, - rightSeriesIndices: []int{1}, + leftSeriesIndices: []int{1}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{1}}, }, { - leftSeriesIndices: []int{2}, - rightSeriesIndices: []int{2}, + leftSeriesIndices: []int{2}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{2}}, }, }, @@ -256,12 +256,12 @@ func TestOneToOneVectorVectorBinaryOperation_Sorting(t *testing.T) { "two output series, both with one input series, read from both sides in same order but sorted incorrectly": { series: []*oneToOneBinaryOperationOutputSeries{ { - leftSeriesIndices: []int{2}, - rightSeriesIndices: []int{2}, + leftSeriesIndices: []int{2}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{2}}, }, { - leftSeriesIndices: []int{1}, - rightSeriesIndices: []int{1}, + leftSeriesIndices: []int{1}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{1}}, }, }, @@ -271,12 +271,12 @@ func TestOneToOneVectorVectorBinaryOperation_Sorting(t *testing.T) { "two output series, both with one input series, read from both sides in different order": { series: []*oneToOneBinaryOperationOutputSeries{ { - leftSeriesIndices: []int{1}, - rightSeriesIndices: []int{2}, + leftSeriesIndices: []int{1}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{2}}, }, { - leftSeriesIndices: []int{2}, - rightSeriesIndices: []int{1}, + leftSeriesIndices: []int{2}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{1}}, }, }, @@ -286,12 +286,12 @@ func TestOneToOneVectorVectorBinaryOperation_Sorting(t *testing.T) { "two output series, both with multiple input series": { series: []*oneToOneBinaryOperationOutputSeries{ { - leftSeriesIndices: []int{1, 2}, - rightSeriesIndices: []int{0, 3}, + leftSeriesIndices: []int{1, 2}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{0, 3}}, }, { - leftSeriesIndices: []int{0, 3}, - rightSeriesIndices: []int{1, 2}, + leftSeriesIndices: []int{0, 3}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{1, 2}}, }, }, @@ -301,16 +301,16 @@ func TestOneToOneVectorVectorBinaryOperation_Sorting(t *testing.T) { "multiple output series, both with one input series, read from both sides in same order and already sorted correctly": { series: []*oneToOneBinaryOperationOutputSeries{ { - leftSeriesIndices: []int{1}, - rightSeriesIndices: []int{1}, + leftSeriesIndices: []int{1}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{1}}, }, { - leftSeriesIndices: []int{2}, - rightSeriesIndices: []int{2}, + leftSeriesIndices: []int{2}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{2}}, }, { - leftSeriesIndices: []int{3}, - rightSeriesIndices: []int{3}, + leftSeriesIndices: []int{3}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{3}}, }, }, @@ -320,16 +320,16 @@ func TestOneToOneVectorVectorBinaryOperation_Sorting(t *testing.T) { "multiple output series, both with one input series, read from both sides in same order but sorted incorrectly": { series: []*oneToOneBinaryOperationOutputSeries{ { - leftSeriesIndices: []int{2}, - rightSeriesIndices: []int{2}, + leftSeriesIndices: []int{2}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{2}}, }, { - leftSeriesIndices: []int{3}, - rightSeriesIndices: []int{3}, + leftSeriesIndices: []int{3}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{3}}, }, { - leftSeriesIndices: []int{1}, - rightSeriesIndices: []int{1}, + leftSeriesIndices: []int{1}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{1}}, }, }, @@ -339,16 +339,16 @@ func TestOneToOneVectorVectorBinaryOperation_Sorting(t *testing.T) { "multiple output series, both with one input series, read from both sides in different order": { series: []*oneToOneBinaryOperationOutputSeries{ { - leftSeriesIndices: []int{1}, - rightSeriesIndices: []int{2}, + leftSeriesIndices: []int{1}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{2}}, }, { - leftSeriesIndices: []int{3}, - rightSeriesIndices: []int{3}, + leftSeriesIndices: []int{3}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{3}}, }, { - leftSeriesIndices: []int{2}, - rightSeriesIndices: []int{1}, + leftSeriesIndices: []int{2}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{1}}, }, }, @@ -358,16 +358,16 @@ func TestOneToOneVectorVectorBinaryOperation_Sorting(t *testing.T) { "multiple output series, with multiple input series each": { series: []*oneToOneBinaryOperationOutputSeries{ { - leftSeriesIndices: []int{4, 5, 10}, - rightSeriesIndices: []int{2, 20}, + leftSeriesIndices: []int{4, 5, 10}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{2, 20}}, }, { - leftSeriesIndices: []int{2, 4, 15}, - rightSeriesIndices: []int{3, 5, 50}, + leftSeriesIndices: []int{2, 4, 15}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{3, 5, 50}}, }, { - leftSeriesIndices: []int{3, 1}, - rightSeriesIndices: []int{1, 40}, + leftSeriesIndices: []int{3, 1}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{1, 40}}, }, }, @@ -377,20 +377,20 @@ func TestOneToOneVectorVectorBinaryOperation_Sorting(t *testing.T) { "multiple output series which depend on the same input series": { series: []*oneToOneBinaryOperationOutputSeries{ { - leftSeriesIndices: []int{1}, - rightSeriesIndices: []int{2}, + leftSeriesIndices: []int{1}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{2}}, }, { - leftSeriesIndices: []int{1}, - rightSeriesIndices: []int{1}, + leftSeriesIndices: []int{1}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{1}}, }, { - leftSeriesIndices: []int{2}, - rightSeriesIndices: []int{2}, + leftSeriesIndices: []int{2}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{2}}, }, { - leftSeriesIndices: []int{2}, - rightSeriesIndices: []int{1}, + leftSeriesIndices: []int{2}, + rightSide: &oneToOneBinaryOperationRightSide{rightSeriesIndices: []int{1}}, }, }, diff --git a/pkg/streamingpromql/query.go b/pkg/streamingpromql/query.go index 6f44018866c..3c76763db20 100644 --- a/pkg/streamingpromql/query.go +++ b/pkg/streamingpromql/query.go @@ -276,7 +276,7 @@ func (q *Query) convertToInstantVectorOperator(expr parser.Expr, timeRange types case parser.CardOneToMany, parser.CardManyToOne: return binops.NewGroupedVectorVectorBinaryOperation(lhs, rhs, *e.VectorMatching, e.Op, e.ReturnBool, q.memoryConsumptionTracker, q.annotations, e.PositionRange(), timeRange) case parser.CardOneToOne: - return binops.NewOneToOneVectorVectorBinaryOperation(lhs, rhs, *e.VectorMatching, e.Op, e.ReturnBool, q.memoryConsumptionTracker, q.annotations, e.PositionRange()) + return binops.NewOneToOneVectorVectorBinaryOperation(lhs, rhs, *e.VectorMatching, e.Op, e.ReturnBool, q.memoryConsumptionTracker, q.annotations, e.PositionRange(), timeRange) default: return nil, compat.NewNotSupportedError(fmt.Sprintf("binary expression with %v matching for '%v'", e.VectorMatching.Card, e.Op)) } diff --git a/pkg/streamingpromql/testdata/ours/binary_operators.test b/pkg/streamingpromql/testdata/ours/binary_operators.test index cb33558432e..f5725eb0399 100644 --- a/pkg/streamingpromql/testdata/ours/binary_operators.test +++ b/pkg/streamingpromql/testdata/ours/binary_operators.test @@ -1397,10 +1397,9 @@ load 6m left_side_b{env="test", pod="a"} 5 6 7 8 right_side{env="test", pod="a"} 2 2 7 7 -# FIXME: MQE currently does not correctly handle this case because it performs filtering after merging input series, whereas we should do it in the other order. -#eval range from 0 to 18m step 6m {__name__=~"left_side.*"} == ignoring(env) right_side -# left_side_a{pod="a"} _ 2 _ _ -# left_side_b{pod="a"} _ _ 7 _ +eval range from 0 to 18m step 6m {__name__=~"left_side.*"} == ignoring(env) right_side + left_side_a{pod="a"} _ 2 _ _ + left_side_b{pod="a"} _ _ 7 _ eval_fail range from 0 to 18m step 6m {__name__=~"left_side.*"} == bool ignoring(env) right_side expected_fail_regexp (multiple matches for labels: many-to-one matching must be explicit|found duplicate series for the match group .* on the left side of the operation) @@ -1416,9 +1415,8 @@ eval_fail range from 0 to 18m step 6m right_side == bool ignoring(env) {__name__ # left_side_b{pod="a"} _ _ 7 _ # but instead both engines drop the metric names in the output. # This is accepted behaviour: https://github.com/prometheus/prometheus/issues/5326 -# FIXME: MQE currently does not correctly handle this case because it performs filtering after merging input series, whereas we should do it in the other order. -#eval range from 0 to 18m step 6m {__name__=~"left_side.*"} == on(pod) right_side -# {pod="a"} _ 2 7 _ +eval range from 0 to 18m step 6m {__name__=~"left_side.*"} == on(pod) right_side + {pod="a"} _ 2 7 _ eval_fail range from 0 to 18m step 6m {__name__=~"left_side.*"} == bool on(pod) right_side expected_fail_regexp (multiple matches for labels: many-to-one matching must be explicit|found duplicate series for the match group .* on the left side of the operation) @@ -1458,9 +1456,11 @@ load 6m left{pod="b"} 5 6 7 8 right 2 2 7 7 -# FIXME: MQE currently does not correctly handle this case because it performs filtering after merging input series, whereas we should do it in the other order. -# eval range from 0 to 18m step 6m left == ignoring(pod) right -# left _ 2 7 _ +eval range from 0 to 18m step 6m left == ignoring(pod) right + left _ 2 7 _ + +eval_fail range from 0 to 18m step 6m left == ignoring(pod) group_right right + expected_fail_regexp found duplicate series for the match group .* on the left (hand-)?side of the operation clear @@ -1470,10 +1470,9 @@ load 6m left_side_b{env="test", pod="a"} _ _ 7 8 right_side{env="test", pod="a"} 2 2 7 7 -# FIXME: MQE currently does not correctly handle this case. -#eval range from 0 to 18m step 6m {__name__=~"left_side.*"} == ignoring(env) right_side -# left_side_a{pod="a"} _ 2 _ _ -# left_side_b{pod="a"} _ _ 7 _ +eval range from 0 to 18m step 6m {__name__=~"left_side.*"} == ignoring(env) right_side + left_side_a{pod="a"} _ 2 _ _ + left_side_b{pod="a"} _ _ 7 _ eval range from 0 to 18m step 6m {__name__=~"left_side.*"} == bool ignoring(env) right_side {pod="a"} 0 1 1 0 From 6511332d925932eedcff490d7a4f1f9b21e520a2 Mon Sep 17 00:00:00 2001 From: Jon Kartago Lamida Date: Fri, 10 Jan 2025 14:00:38 +0800 Subject: [PATCH 06/18] MQE: Add support for delta function (#9795) * First step delta function Signed-off-by: Jon Kartago Lamida * Enable delta functions.test Signed-off-by: Jon Kartago Lamida * Add delta to engine_test.go Signed-off-by: Jon Kartago Lamida * Clear up the comment Signed-off-by: Jon Kartago Lamida * Flip floatRate isCounter check condition Signed-off-by: Jon Kartago Lamida * Fix delta and implement delta annotation Signed-off-by: Jon Kartago Lamida * Clear up comment for isCounter and isRate parameter Signed-off-by: Jon Kartago Lamida * Remove unneeded comment Signed-off-by: Jon Kartago Lamida * Fix delta should not consider reset Signed-off-by: Jon Kartago Lamida * Add more tests Signed-off-by: Jon Kartago Lamida * Extract delta from rate function (#10353) * Extract delta from rate function Signed-off-by: Jon Kartago Lamida * Update pkg/streamingpromql/operators/functions/rate_increase.go Co-authored-by: Charles Korn * Update pkg/streamingpromql/operators/functions/rate_increase.go Co-authored-by: Charles Korn * Update pkg/streamingpromql/operators/functions/rate_increase.go Co-authored-by: Charles Korn * Update pkg/streamingpromql/operators/functions/rate_increase.go Co-authored-by: Charles Korn * Tidy up after applying PR suggestion Signed-off-by: Jon Kartago Lamida * Remove unnecessary head subslice Signed-off-by: Jon Kartago Lamida * Remove wrong placed annotation Signed-off-by: Jon Kartago Lamida * Remove extra copySchema Signed-off-by: Jon Kartago Lamida * Simplify native histogram sub schema Signed-off-by: Jon Kartago Lamida * Remove TODO Signed-off-by: Jon Kartago Lamida * The lastPoint should be last index of the head Signed-off-by: Jon Kartago Lamida * Add delta counterResetHint test Signed-off-by: Jon Kartago Lamida --------- Signed-off-by: Jon Kartago Lamida Co-authored-by: Charles Korn * Add various our tests to delta Signed-off-by: Jon Kartago Lamida * Add reset_hint annotation test Signed-off-by: Jon Kartago Lamida * Revert comment change Signed-off-by: Jon Kartago Lamida * Revert whitespace removal Signed-off-by: Jon Kartago Lamida * Add mix reset hint sample delta annotation Signed-off-by: Jon Kartago Lamida * Remove too much whitespace Signed-off-by: Jon Kartago Lamida * Add more delta tests Signed-off-by: Jon Kartago Lamida * Remove unnecessary sprintf Signed-off-by: Jon Kartago Lamida * Update pkg/streamingpromql/engine_test.go Co-authored-by: Charles Korn * Update pkg/streamingpromql/engine_test.go Co-authored-by: Charles Korn * Update pkg/streamingpromql/engine_test.go Co-authored-by: Charles Korn * Update pkg/streamingpromql/engine_test.go Co-authored-by: Charles Korn * Update pkg/streamingpromql/engine_test.go Co-authored-by: Charles Korn * Simplify inline map Signed-off-by: Jon Kartago Lamida * Really enable delta upstream tests Signed-off-by: Jon Kartago Lamida --------- Signed-off-by: Jon Kartago Lamida Co-authored-by: Charles Korn --- pkg/streamingpromql/engine_test.go | 54 +++++++++++- pkg/streamingpromql/functions.go | 1 + .../operators/functions/rate_increase.go | 83 ++++++++++++++++++- .../testdata/ours/functions.test | 50 +++++++++++ .../testdata/upstream/functions.test | 26 +++--- 5 files changed, 195 insertions(+), 19 deletions(-) diff --git a/pkg/streamingpromql/engine_test.go b/pkg/streamingpromql/engine_test.go index d4b9bc3d973..981bba7c580 100644 --- a/pkg/streamingpromql/engine_test.go +++ b/pkg/streamingpromql/engine_test.go @@ -2054,6 +2054,13 @@ func TestAnnotations(t *testing.T) { metric{series="incompatible-custom-buckets"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[2 3] buckets:[1]}} {{schema:-53 sum:5 count:4 custom_values:[5 10] buckets:[1 2 1]}} ` + nativeHistogramsWithResetHintsMix := ` + metric{reset_hint="unknown"} {{schema:0 sum:0 count:0}}+{{schema:0 sum:5 count:4 buckets:[1 2 1]}}x3 + metric{reset_hint="gauge"} {{schema:0 sum:0 count:0 counter_reset_hint:gauge}}+{{schema:0 sum:5 count:4 buckets:[1 2 1] counter_reset_hint:gauge}}x3 + metric{reset_hint="gauge-unknown"} {{schema:0 sum:0 count:0 counter_reset_hint:gauge}} {{schema:0 sum:0 count:0}}+{{schema:0 sum:5 count:4 buckets:[1 2 1]}}x3 + metric{reset_hint="unknown-gauge"} {{schema:0 sum:0 count:0}}+{{schema:0 sum:5 count:4 buckets:[1 2 1] counter_reset_hint:gauge}}x3 + ` + testCases := map[string]annotationTestCase{ "sum() with float and native histogram at same step": { data: mixedFloatHistogramData, @@ -2073,6 +2080,26 @@ func TestAnnotations(t *testing.T) { expr: `sum(metric{type="histogram"})`, }, + "delta() over a native histogram with unknown CounterResetHint": { + data: nativeHistogramsWithResetHintsMix, + expr: `delta(metric{reset_hint="unknown"}[3m])`, + expectedWarningAnnotations: []string{`PromQL warning: this native histogram metric is not a gauge: "metric" (1:7)`}, + }, + "delta() over a native histogram with gauge CounterResetHint": { + data: nativeHistogramsWithResetHintsMix, + expr: `delta(metric{reset_hint="gauge"}[3m])`, + }, + "delta() with first point having gauge CounterResetHint and last point having unknown CounterResetHint": { + data: nativeHistogramsWithResetHintsMix, + expr: `delta(metric{reset_hint="gauge-unknown"}[3m])`, + expectedWarningAnnotations: []string{`PromQL warning: this native histogram metric is not a gauge: "metric" (1:7)`}, + }, + "delta() with first point having unknown CounterResetHint and last point having gauge CounterResetHint": { + data: nativeHistogramsWithResetHintsMix, + expr: `delta(metric{reset_hint="unknown-gauge"}[3m])`, + expectedWarningAnnotations: []string{`PromQL warning: this native histogram metric is not a gauge: "metric" (1:7)`}, + }, + "stdvar() with only floats": { data: mixedFloatHistogramData, expr: `stdvar(metric{type="float"})`, @@ -2340,6 +2367,31 @@ func TestRateIncreaseAnnotations(t *testing.T) { runAnnotationTests(t, testCases) } +func TestDeltaAnnotations(t *testing.T) { + nativeHistogramsWithGaugeResetHints := ` + metric{series="mix-float-nh"} 10 {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1] counter_reset_hint:gauge}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1] counter_reset_hint:gauge}} {{schema:-53 sum:5 count:4 custom_values:[5 10] buckets:[1] counter_reset_hint:gauge}} + metric{series="mixed-exponential-custom-buckets"} {{schema:0 sum:1 count:1 buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:0 sum:5 count:4 buckets:[1 2 1]}} + ` + + testCases := map[string]annotationTestCase{ + "delta() over series with mixed floats and native histograms": { + data: nativeHistogramsWithGaugeResetHints, + expr: `delta(metric{series="mix-float-nh"}[1m1s])`, + expectedWarningAnnotations: []string{ + `PromQL warning: encountered a mix of histograms and floats for metric name "metric" (1:7)`, + }, + }, + "delta() over metric with incompatible schema": { + data: nativeHistogramsWithGaugeResetHints, + expr: `delta(metric{series="mixed-exponential-custom-buckets"}[1m1s])`, + expectedWarningAnnotations: []string{ + `PromQL warning: vector contains a mix of histograms with exponential and custom buckets schemas for metric name "metric" (1:7)`, + }, + }, + } + runAnnotationTests(t, testCases) +} + func TestBinaryOperationAnnotations(t *testing.T) { mixedFloatHistogramData := ` metric{type="float", series="1"} 0+1x3 @@ -2801,7 +2853,7 @@ func TestCompareVariousMixedMetricsVectorSelectors(t *testing.T) { for _, labels := range labelCombinations { labelRegex := strings.Join(labels, "|") - for _, function := range []string{"rate", "increase", "changes", "resets", "deriv", "irate", "idelta"} { + for _, function := range []string{"rate", "increase", "changes", "resets", "deriv", "irate", "idelta", "delta"} { expressions = append(expressions, fmt.Sprintf(`%s(series{label=~"(%s)"}[45s])`, function, labelRegex)) expressions = append(expressions, fmt.Sprintf(`%s(series{label=~"(%s)"}[1m])`, function, labelRegex)) expressions = append(expressions, fmt.Sprintf(`sum(%s(series{label=~"(%s)"}[2m15s]))`, function, labelRegex)) diff --git a/pkg/streamingpromql/functions.go b/pkg/streamingpromql/functions.go index 99415ccce71..7255b7f9875 100644 --- a/pkg/streamingpromql/functions.go +++ b/pkg/streamingpromql/functions.go @@ -364,6 +364,7 @@ var instantVectorFunctionOperatorFactories = map[string]InstantVectorFunctionOpe "cosh": InstantVectorTransformationFunctionOperatorFactory("cosh", functions.Cosh), "count_over_time": FunctionOverRangeVectorOperatorFactory("count_over_time", functions.CountOverTime), "deg": InstantVectorTransformationFunctionOperatorFactory("deg", functions.Deg), + "delta": FunctionOverRangeVectorOperatorFactory("delta", functions.Delta), "deriv": FunctionOverRangeVectorOperatorFactory("deriv", functions.Deriv), "exp": InstantVectorTransformationFunctionOperatorFactory("exp", functions.Exp), "floor": InstantVectorTransformationFunctionOperatorFactory("floor", functions.Floor), diff --git a/pkg/streamingpromql/operators/functions/rate_increase.go b/pkg/streamingpromql/operators/functions/rate_increase.go index fa5e040424f..f662ffd09bc 100644 --- a/pkg/streamingpromql/operators/functions/rate_increase.go +++ b/pkg/streamingpromql/operators/functions/rate_increase.go @@ -29,6 +29,12 @@ var Increase = FunctionOverRangeVectorDefinition{ NeedsSeriesNamesForAnnotations: true, } +var Delta = FunctionOverRangeVectorDefinition{ + StepFunc: delta, + SeriesMetadataFunction: DropSeriesName, + NeedsSeriesNamesForAnnotations: true, +} + // isRate is true for `rate` function, or false for `instant` function func rate(isRate bool) RangeVectorStepFunction { return func(step *types.RangeVectorStepData, rangeSeconds float64, emitAnnotation types.EmitAnnotationFunc) (float64, bool, *histogram.FloatHistogram, error) { @@ -169,7 +175,7 @@ func floatRate(isRate bool, fCount int, fHead []promql.FPoint, fTail []promql.FP accumulate(fHead) accumulate(fTail) - val := calculateFloatRate(isRate, rangeStart, rangeEnd, rangeSeconds, firstPoint, lastPoint, delta, fCount) + val := calculateFloatRate(true, isRate, rangeStart, rangeEnd, rangeSeconds, firstPoint, lastPoint, delta, fCount) return val } @@ -208,7 +214,7 @@ func calculateHistogramRate(isRate bool, rangeStart, rangeEnd int64, rangeSecond // This is based on extrapolatedRate from promql/functions.go. // https://github.com/prometheus/prometheus/pull/13725 has a good explanation of the intended behaviour here. -func calculateFloatRate(isRate bool, rangeStart, rangeEnd int64, rangeSeconds float64, firstPoint, lastPoint promql.FPoint, delta float64, count int) float64 { +func calculateFloatRate(isCounter, isRate bool, rangeStart, rangeEnd int64, rangeSeconds float64, firstPoint, lastPoint promql.FPoint, delta float64, count int) float64 { durationToStart := float64(firstPoint.T-rangeStart) / 1000 durationToEnd := float64(rangeEnd-lastPoint.T) / 1000 @@ -222,7 +228,7 @@ func calculateFloatRate(isRate bool, rangeStart, rangeEnd int64, rangeSeconds fl durationToStart = averageDurationBetweenSamples / 2 } - if delta > 0 && firstPoint.F >= 0 { + if isCounter && delta > 0 && firstPoint.F >= 0 { // Counters cannot be negative. If we have any slope at all // (i.e. delta went up), we can extrapolate the zero point // of the counter. If the duration to the zero point is shorter @@ -272,3 +278,74 @@ func rateSeriesValidator() RangeVectorSeriesValidationFunction { lastCheckedMetricName = metricName } } + +func delta(step *types.RangeVectorStepData, rangeSeconds float64, emitAnnotation types.EmitAnnotationFunc) (float64, bool, *histogram.FloatHistogram, error) { + fHead, fTail := step.Floats.UnsafePoints() + fCount := len(fHead) + len(fTail) + + hHead, hTail := step.Histograms.UnsafePoints() + hCount := len(hHead) + len(hTail) + + if fCount > 0 && hCount > 0 { + // We need either at least two histograms and no floats, or at least two floats and no histograms to calculate a delta. + // Otherwise, emit a warning and drop this sample. + emitAnnotation(annotations.NewMixedFloatsHistogramsWarning) + return 0, false, nil, nil + } + + if fCount >= 2 { + val := floatDelta(fCount, fHead, fTail, step.RangeStart, step.RangeEnd, rangeSeconds) + return val, true, nil, nil + } + + if hCount >= 2 { + val, err := histogramDelta(hCount, hHead, hTail, step.RangeStart, step.RangeEnd, rangeSeconds, emitAnnotation) + if err != nil { + err = NativeHistogramErrorToAnnotation(err, emitAnnotation) + return 0, false, nil, err + } + return 0, false, val, nil + } + + return 0, false, nil, nil +} + +func floatDelta(fCount int, fHead []promql.FPoint, fTail []promql.FPoint, rangeStart int64, rangeEnd int64, rangeSeconds float64) float64 { + firstPoint := fHead[0] + + var lastPoint promql.FPoint + if len(fTail) > 0 { + lastPoint = fTail[len(fTail)-1] + } else { + lastPoint = fHead[len(fHead)-1] + } + + delta := lastPoint.F - firstPoint.F + return calculateFloatRate(false, false, rangeStart, rangeEnd, rangeSeconds, firstPoint, lastPoint, delta, fCount) +} + +func histogramDelta(hCount int, hHead []promql.HPoint, hTail []promql.HPoint, rangeStart int64, rangeEnd int64, rangeSeconds float64, emitAnnotation types.EmitAnnotationFunc) (*histogram.FloatHistogram, error) { + firstPoint := hHead[0] + + var lastPoint promql.HPoint + if len(hTail) > 0 { + lastPoint = hTail[len(hTail)-1] + } else { + lastPoint = hHead[len(hHead)-1] + } + + if firstPoint.H.UsesCustomBuckets() != lastPoint.H.UsesCustomBuckets() { + return nil, histogram.ErrHistogramsIncompatibleSchema + } + + delta, err := lastPoint.H.Copy().Sub(firstPoint.H) + if err != nil { + return nil, err + } + if firstPoint.H.CounterResetHint != histogram.GaugeType || lastPoint.H.CounterResetHint != histogram.GaugeType { + emitAnnotation(annotations.NewNativeHistogramNotGaugeWarning) + } + + val := calculateHistogramRate(false, rangeStart, rangeEnd, rangeSeconds, firstPoint, lastPoint, delta, hCount) + return val, nil +} diff --git a/pkg/streamingpromql/testdata/ours/functions.test b/pkg/streamingpromql/testdata/ours/functions.test index 9105bc01848..6a8e7964415 100644 --- a/pkg/streamingpromql/testdata/ours/functions.test +++ b/pkg/streamingpromql/testdata/ours/functions.test @@ -25,6 +25,13 @@ eval range from 0 to 4m step 1m increase(some_metric_count[1m1s]) {env="test", cluster="eu"} _ 180 183 183 183 {env="test", cluster="us"} _ 240 244 244 244 +# Range query with delta. +eval range from 0 to 4m step 1m delta(some_metric_count[1m1s]) + {env="prod", cluster="eu"} _ 61 61 61 61 + {env="prod", cluster="us"} _ 122 122 122 122 + {env="test", cluster="eu"} _ 183 183 183 183 + {env="test", cluster="us"} _ 244 244 244 244 + # If no series are matched, we shouldn't return any results. eval range from 0 to 4m step 1m rate(some_nonexistent_metric[1m]) # Should return no results. @@ -32,6 +39,9 @@ eval range from 0 to 4m step 1m rate(some_nonexistent_metric[1m]) eval range from 0 to 4m step 1m increase(some_nonexistent_metric[1m]) # Should return no results. +eval range from 0 to 4m step 1m delta(some_nonexistent_metric[1m]) + # Should return no results. + # Ensure we don't include points outside the range of each individual step. # # When evaluating a range selector, if there is no point with timestamp equal to the end of the range, @@ -622,6 +632,46 @@ eval range from 0 to 8m step 1m irate(metric[3m1s]) clear +# Testing irate and idelta +# nh stands for native histogram +# nhcb stands for native histogram custom bucket +load 1m + metric{case="1 float"} 9 + metric{case="2 floats"} 1 5 + metric{case="all floats with reset"} 1 7 1 7 1 7 1 7 + metric{case="2 floats with missing middle sample"} 1 _ 5 + metric{case="2 floats with missing 2 middle samples"} 1 _ _ 5 + metric{case="2 floats with missing last sample"} 1 5 _ + metric{case="2 floats with NaN middle sample"} 1 NaN 5 + metric{case="2 floats with NaN 2 middle samples"} 1 NaN NaN 5 + metric{case="2 floats with NaN last sample"} 1 5 NaN + metric{case="2 floats with Inf middle sample"} 1 Inf 5 + metric{case="2 floats with Inf 2 middle samples"} 1 Inf Inf 5 + metric{case="2 floats with Inf last sample"} 1 5 Inf + metric{case="all NaN"} NaN NaN NaN NaN + metric{case="all Inf"} Inf Inf Inf Inf + metric{case="all nh up down"} {{schema:3 sum:0 count:0 buckets:[1 1 1] counter_reset_hint:gauge}} {{schema:3 sum:0 count:0 buckets:[4 1 1] counter_reset_hint:gauge}} {{schema:3 sum:0 count:0 buckets:[2 1 1] counter_reset_hint:gauge}} + metric{case="all nhcb up down"} {{schema:-53 sum:0 count:0 buckets:[1 1 1] custom_values:[5 10] counter_reset_hint:gauge}} {{schema:-53 sum:0 count:0 buckets:[4 1 1] custom_values:[5 10] counter_reset_hint:gauge}} {{schema:-53 sum:0 count:0 buckets:[1 1 1] custom_values:[5 10] counter_reset_hint:gauge}} + +eval range from 0 to 8m step 1m delta(metric[3m1s]) + {case="2 floats"} _ 6 12.066666666666666 6.066666666666666 + {case="2 floats with Inf 2 middle samples"} _ +Inf +Inf 4.022222222222222 -Inf -Inf + {case="2 floats with Inf last sample"} _ 6 +Inf +Inf +Inf + {case="2 floats with Inf middle sample"} _ +Inf 6.033333333333333 6.033333333333333 -Inf + {case="2 floats with NaN 2 middle samples"} _ NaN NaN 4.022222222222222 NaN NaN + {case="2 floats with NaN last sample"} _ 6 NaN NaN NaN + {case="2 floats with NaN middle sample"} _ NaN 6.033333333333333 6.033333333333333 NaN + {case="2 floats with missing 2 middle samples"} _ _ _ 4.022222222222222 + {case="2 floats with missing last sample"} _ 6 12.066666666666666 6.066666666666666 + {case="2 floats with missing middle sample"} _ _ 6.033333333333333 6.033333333333333 + {case="all Inf"} _ NaN NaN NaN NaN NaN + {case="all NaN"} _ NaN NaN NaN NaN NaN + {case="all floats with reset"} _ 9 0 6.033333333333333 -6.033333333333333 6.033333333333333 -6.033333333333333 6.033333333333333 0 + {case="all nh up down"} _ {{schema:3 counter_reset_hint:gauge buckets:[4.5]}} {{schema:3 counter_reset_hint:gauge buckets:[1.5083333333333333]}} {{schema:3 counter_reset_hint:gauge buckets:[1.5083333333333333]}} {{schema:3 counter_reset_hint:gauge buckets:[-3.033333333333333]}} + {case="all nhcb up down"} _ {{schema:-53 custom_values:[5 10] counter_reset_hint:gauge buckets:[4.5]}} {{schema:-53 custom_values:[5 10] counter_reset_hint:gauge}} {{schema:-53 custom_values:[5 10] counter_reset_hint:gauge}} {{schema:-53 custom_values:[5 10] counter_reset_hint:gauge buckets:[-4.55]}} + +clear + load 1m some_metric_count{env="prod", cluster="eu"} _ _ _ 0+1x4 some_metric_count{env="prod", cluster="us"} _ _ _ 0+2x4 diff --git a/pkg/streamingpromql/testdata/upstream/functions.test b/pkg/streamingpromql/testdata/upstream/functions.test index 004ef8d1995..4e716c1185b 100644 --- a/pkg/streamingpromql/testdata/upstream/functions.test +++ b/pkg/streamingpromql/testdata/upstream/functions.test @@ -243,24 +243,20 @@ load 5m http_requests_counter{path="/foo"} {{schema:0 sum:0 count:0 buckets:[0 0 0]}}+{{schema:0 sum:1 count:2 buckets:[1 1 1]}}x5 http_requests_mix{path="/foo"} 0 50 100 {{schema:0 sum:0 count:0 buckets:[0 0 0] counter_reset_hint:gauge}} {{schema:0 sum:1 count:2 buckets:[1 1 1] counter_reset_hint:gauge}} -# Unsupported by streaming engine. -# eval instant at 20m delta(http_requests[20m]) -# {path="/foo"} 200 -# {path="/bar"} -200 +eval instant at 20m delta(http_requests[20m]) + {path="/foo"} 200 + {path="/bar"} -200 -# Unsupported by streaming engine. -# eval instant at 20m delta(http_requests_gauge[20m]) -# {path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}} +eval instant at 20m delta(http_requests_gauge[20m]) + {path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}} -# Unsupported by streaming engine. -# # delta emits warn annotation for non-gauge histogram types. -# eval_warn instant at 20m delta(http_requests_counter[20m]) -# {path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}} +# delta emits warn annotation for non-gauge histogram types. +eval_warn instant at 20m delta(http_requests_counter[20m]) + {path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}} -# Unsupported by streaming engine. -# # delta emits warn annotation for mix of histogram and floats. -# eval_warn instant at 20m delta(http_requests_mix[20m]) -# #empty +# delta emits warn annotation for mix of histogram and floats. +eval_warn instant at 20m delta(http_requests_mix[20m]) + #empty clear From 8bedb97dd18aa7b12863caa4c413b52c81cbc8ec Mon Sep 17 00:00:00 2001 From: Dimitar Dimitrov Date: Fri, 10 Jan 2025 15:35:29 +0100 Subject: [PATCH 07/18] ruler: increase retries backoff limit to 1m (#10403) * ruler: increase retries backoff limit to 1m the previous limit of 2s is too small and doesn't end up spreading out retries for long enough Signed-off-by: Dimitar Dimitrov * Update CHANGELOG.md entry Signed-off-by: Dimitar Dimitrov --------- Signed-off-by: Dimitar Dimitrov --- CHANGELOG.md | 2 +- pkg/ruler/remotequerier.go | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cfd832a09c2..760f3b6fd8c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ * [CHANGE] Distributor: OTLP and push handler replace all non-UTF8 characters with the unicode replacement character `\uFFFD` in error messages before propagating them. #10236 * [CHANGE] Querier: pass query matchers to queryable `IsApplicable` hook. #10256 * [CHANGE] Query-frontend: Add `topic` label to `cortex_ingest_storage_strong_consistency_requests_total`, `cortex_ingest_storage_strong_consistency_failures_total`, and `cortex_ingest_storage_strong_consistency_wait_duration_seconds` metrics. #10220 -* [CHANGE] Ruler: cap the rate of retries for remote query evaluation to 170/sec. This is configurable via `-ruler.query-frontend.max-retries-rate`. #10375 +* [CHANGE] Ruler: cap the rate of retries for remote query evaluation to 170/sec. This is configurable via `-ruler.query-frontend.max-retries-rate`. #10375 #10403 * [ENHANCEMENT] Query Frontend: Return server-side `samples_processed` statistics. #10103 * [ENHANCEMENT] Distributor: OTLP receiver now converts also metric metadata. See also https://github.com/prometheus/prometheus/pull/15416. #10168 * [ENHANCEMENT] Distributor: discard float and histogram samples with duplicated timestamps from each timeseries in a request before the request is forwarded to ingesters. Discarded samples are tracked by the `cortex_discarded_samples_total` metrics with reason `sample_duplicate_timestamp`. #10145 diff --git a/pkg/ruler/remotequerier.go b/pkg/ruler/remotequerier.go index 4b0a4d7b1d7..b9bd51101dd 100644 --- a/pkg/ruler/remotequerier.go +++ b/pkg/ruler/remotequerier.go @@ -363,8 +363,9 @@ func (q *RemoteQuerier) sendRequest(ctx context.Context, req *httpgrpc.HTTPReque return nil, fmt.Errorf("couldn't reserve a retry token") } // We want to wait at least the time for the backoff, but also don't want to exceed the rate limit. - // All of this is capped to the max backoff, so that we are less likely to overrun into the next evaluation. - retryDelay := max(retry.NextDelay(), min(retryConfig.MaxBackoff, retryReservation.Delay())) + // All of this is capped to 1m, so that we are less likely to overrun into the next evaluation. + // 1m was selected as giving enough time to spread out the retries. + retryDelay := max(retry.NextDelay(), min(time.Minute, retryReservation.Delay())) level.Warn(logger).Log("msg", "failed to remotely evaluate query expression, will retry", "err", err, "retry_delay", retryDelay) select { case <-time.After(retryDelay): From 6c186ae31d4967d96d9d5f1c1d7a78e481cb83dd Mon Sep 17 00:00:00 2001 From: Vladimir Varankin Date: Fri, 10 Jan 2025 15:45:35 +0100 Subject: [PATCH 08/18] mimir-distributed: allow components to override their container image (#10340) * mimir-distributed: allow components to override their container image Signed-off-by: Vladimir Varankin * add test values Signed-off-by: Vladimir Varankin * rebuild assets Signed-off-by: Vladimir Varankin * update changelog Signed-off-by: Vladimir Varankin * update code comments Signed-off-by: Vladimir Varankin * add default values for component images Signed-off-by: Vladimir Varankin * update changelog Signed-off-by: Vladimir Varankin * rebuild assets Signed-off-by: Vladimir Varankin * fix how build script removes default image from generated manifest Signed-off-by: Vladimir Varankin * rebuild assets Signed-off-by: Vladimir Varankin * update values comments Signed-off-by: Vladimir Varankin --------- Signed-off-by: Vladimir Varankin --- .../charts/mimir-distributed/CHANGELOG.md | 1 + ...est-enterprise-component-image-values.yaml | 97 ++++ .../test-oss-component-image-values.yaml | 84 ++++ .../mimir-distributed/templates/_helpers.tpl | 14 +- .../templates/admin-api/admin-api-dep.yaml | 2 +- .../alertmanager/alertmanager-dep.yaml | 2 +- .../alertmanager-statefulset.yaml | 2 +- .../compactor/compactor-statefulset.yaml | 2 +- .../continuous_test/continuous-test-dep.yaml | 2 +- .../distributor/distributor-dep.yaml | 2 +- .../federation-frontend-dep.yaml | 2 +- .../templates/gateway/gateway-dep.yaml | 2 +- .../graphite-querier-dep.yaml | 2 +- .../graphite-write-proxy-dep.yaml | 2 +- .../ingester/ingester-statefulset.yaml | 2 +- .../overrides-exporter-dep.yaml | 2 +- .../templates/querier/querier-dep.yaml | 2 +- .../query-frontend/query-frontend-dep.yaml | 2 +- .../query-scheduler/query-scheduler-dep.yaml | 2 +- .../ruler-querier/ruler-querier-dep.yaml | 2 +- .../ruler-query-frontend-dep.yaml | 2 +- .../ruler-query-scheduler-dep.yaml | 2 +- .../templates/ruler/ruler-dep.yaml | 2 +- .../templates/smoke-test/smoke-test-job.yaml | 2 +- .../store-gateway-statefulset.yaml | 2 +- .../templates/tokengen/tokengen-job.yaml | 2 +- .../helm/charts/mimir-distributed/values.yaml | 110 +++++ operations/helm/scripts/build.sh | 2 +- .../templates/deployment.yaml | 65 +++ .../rollout_operator/templates/role.yaml | 36 ++ .../templates/rolebinding.yaml | 19 + .../templates/serviceaccount.yaml | 12 + .../templates/admin-api/admin-api-dep.yaml | 119 +++++ .../templates/admin-api/admin-api-pdb.yaml | 19 + .../templates/admin-api/admin-api-svc.yaml | 30 ++ .../alertmanager/alertmanager-config.yaml | 21 + .../alertmanager/alertmanager-pdb.yaml | 19 + .../alertmanager-statefulset.yaml | 139 ++++++ .../alertmanager-svc-headless.yaml | 36 ++ .../alertmanager/alertmanager-svc.yaml | 30 ++ .../templates/compactor/compactor-pdb.yaml | 19 + .../compactor/compactor-statefulset.yaml | 127 ++++++ .../templates/compactor/compactor-svc.yaml | 30 ++ .../distributor/distributor-dep.yaml | 132 ++++++ .../distributor/distributor-pdb.yaml | 19 + .../distributor/distributor-svc-headless.yaml | 32 ++ .../distributor/distributor-svc.yaml | 30 ++ .../federation-frontend-dep.yaml | 96 ++++ .../federation-frontend-pdb.yaml | 19 + .../federation-frontend-svc.yaml | 29 ++ .../templates/gateway/gateway-dep.yaml | 110 +++++ .../templates/gateway/gateway-pdb.yaml | 19 + .../templates/gateway/gateway-svc.yaml | 29 ++ .../gossip-ring/gossip-ring-svc.yaml | 26 ++ .../templates/ingester/ingester-pdb.yaml | 19 + .../ingester/ingester-statefulset.yaml | 426 +++++++++++++++++ .../ingester/ingester-svc-headless.yaml | 32 ++ .../templates/ingester/ingester-svc.yaml | 105 +++++ .../templates/license-secret.yaml | 13 + .../templates/mimir-config.yaml | 137 ++++++ .../overrides-exporter-dep.yaml | 109 +++++ .../overrides-exporter-pdb.yaml | 19 + .../overrides-exporter-svc.yaml | 29 ++ .../templates/podsecuritypolicy.yaml | 40 ++ .../templates/querier/querier-dep.yaml | 124 +++++ .../templates/querier/querier-pdb.yaml | 19 + .../templates/querier/querier-svc.yaml | 30 ++ .../query-frontend/query-frontend-dep.yaml | 121 +++++ .../query-frontend/query-frontend-pdb.yaml | 19 + .../query-frontend/query-frontend-svc.yaml | 29 ++ .../query-scheduler/query-scheduler-dep.yaml | 113 +++++ .../query-scheduler/query-scheduler-pdb.yaml | 19 + .../query-scheduler-svc-headless.yaml | 32 ++ .../query-scheduler/query-scheduler-svc.yaml | 29 ++ .../mimir-distributed/templates/role.yaml | 16 + .../templates/rolebinding.yaml | 20 + .../templates/ruler/ruler-dep.yaml | 123 +++++ .../templates/ruler/ruler-pdb.yaml | 19 + .../templates/ruler/ruler-svc.yaml | 26 ++ .../templates/runtime-configmap.yaml | 15 + .../templates/serviceaccount.yaml | 13 + .../store-gateway/store-gateway-pdb.yaml | 19 + .../store-gateway-statefulset.yaml | 429 ++++++++++++++++++ .../store-gateway-svc-headless.yaml | 32 ++ .../store-gateway/store-gateway-svc.yaml | 105 +++++ .../templates/tokengen/tokengen-job.yaml | 74 +++ .../templates/deployment.yaml | 65 +++ .../rollout_operator/templates/role.yaml | 36 ++ .../templates/rolebinding.yaml | 19 + .../templates/serviceaccount.yaml | 12 + .../alertmanager/alertmanager-config.yaml | 21 + .../alertmanager/alertmanager-pdb.yaml | 19 + .../alertmanager-statefulset.yaml | 134 ++++++ .../alertmanager-svc-headless.yaml | 36 ++ .../alertmanager/alertmanager-svc.yaml | 30 ++ .../templates/compactor/compactor-pdb.yaml | 19 + .../compactor/compactor-statefulset.yaml | 122 +++++ .../templates/compactor/compactor-svc.yaml | 30 ++ .../distributor/distributor-dep.yaml | 127 ++++++ .../distributor/distributor-pdb.yaml | 19 + .../distributor/distributor-svc-headless.yaml | 32 ++ .../distributor/distributor-svc.yaml | 30 ++ .../gossip-ring/gossip-ring-svc.yaml | 26 ++ .../templates/ingester/ingester-pdb.yaml | 19 + .../ingester/ingester-statefulset.yaml | 411 +++++++++++++++++ .../ingester/ingester-svc-headless.yaml | 32 ++ .../templates/ingester/ingester-svc.yaml | 105 +++++ .../templates/mimir-config.yaml | 96 ++++ .../templates/nginx/nginx-configmap.yaml | 138 ++++++ .../templates/nginx/nginx-dep.yaml | 92 ++++ .../templates/nginx/nginx-pdb.yaml | 19 + .../templates/nginx/nginx-svc.yaml | 25 + .../overrides-exporter-dep.yaml | 104 +++++ .../overrides-exporter-pdb.yaml | 19 + .../overrides-exporter-svc.yaml | 29 ++ .../templates/podsecuritypolicy.yaml | 40 ++ .../templates/querier/querier-dep.yaml | 119 +++++ .../templates/querier/querier-pdb.yaml | 19 + .../templates/querier/querier-svc.yaml | 30 ++ .../query-frontend/query-frontend-dep.yaml | 116 +++++ .../query-frontend/query-frontend-pdb.yaml | 19 + .../query-frontend/query-frontend-svc.yaml | 29 ++ .../query-scheduler/query-scheduler-dep.yaml | 108 +++++ .../query-scheduler/query-scheduler-pdb.yaml | 19 + .../query-scheduler-svc-headless.yaml | 32 ++ .../query-scheduler/query-scheduler-svc.yaml | 29 ++ .../mimir-distributed/templates/role.yaml | 16 + .../templates/rolebinding.yaml | 20 + .../templates/ruler/ruler-dep.yaml | 118 +++++ .../templates/ruler/ruler-pdb.yaml | 19 + .../templates/ruler/ruler-svc.yaml | 26 ++ .../templates/runtime-configmap.yaml | 15 + .../templates/serviceaccount.yaml | 13 + .../templates/smoke-test/smoke-test-job.yaml | 54 +++ .../store-gateway/store-gateway-pdb.yaml | 19 + .../store-gateway-statefulset.yaml | 414 +++++++++++++++++ .../store-gateway-svc-headless.yaml | 32 ++ .../store-gateway/store-gateway-svc.yaml | 105 +++++ 138 files changed, 7221 insertions(+), 25 deletions(-) create mode 100644 operations/helm/charts/mimir-distributed/ci/offline/test-enterprise-component-image-values.yaml create mode 100644 operations/helm/charts/mimir-distributed/ci/offline/test-oss-component-image-values.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/role.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/rolebinding.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/admin-api/admin-api-pdb.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/admin-api/admin-api-svc.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-config.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/compactor/compactor-pdb.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/distributor/distributor-pdb.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/federation-frontend/federation-frontend-dep.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/federation-frontend/federation-frontend-pdb.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/federation-frontend/federation-frontend-svc.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/gateway/gateway-pdb.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/gateway/gateway-svc.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/license-secret.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/mimir-config.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/querier/querier-dep.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/querier/querier-pdb.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/querier/querier-svc.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-pdb.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc-headless.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/role.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/rolebinding.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ruler/ruler-pdb.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/runtime-configmap.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml create mode 100644 operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/role.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/rolebinding.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-config.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/compactor/compactor-pdb.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/distributor/distributor-pdb.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/mimir-config.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/nginx/nginx-dep.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/nginx/nginx-pdb.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/nginx/nginx-svc.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/querier/querier-dep.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/querier/querier-pdb.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/querier/querier-svc.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-pdb.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc-headless.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/role.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/rolebinding.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ruler/ruler-pdb.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/runtime-configmap.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/serviceaccount.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/smoke-test/smoke-test-job.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml create mode 100644 operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml diff --git a/operations/helm/charts/mimir-distributed/CHANGELOG.md b/operations/helm/charts/mimir-distributed/CHANGELOG.md index d342540d313..a3f3f6cc2bc 100644 --- a/operations/helm/charts/mimir-distributed/CHANGELOG.md +++ b/operations/helm/charts/mimir-distributed/CHANGELOG.md @@ -31,6 +31,7 @@ Entries should include a reference to the Pull Request that introduced the chang * [CHANGE] Memcached: Update to Memcached 1.6.34. #10318 * [ENHANCEMENT] Minio: update subchart to v5.4.0. #10346 +* [ENHANCEMENT] Individual mimir components can override their container images via the *.image values. The component's image definitions always override the values set in global `image` or `enterprise.image`. #10340 * [BUGFIX] Fix calculation of `mimir.siToBytes` and use floating point arithmetics. #10044 ## 5.6.0-rc.0 diff --git a/operations/helm/charts/mimir-distributed/ci/offline/test-enterprise-component-image-values.yaml b/operations/helm/charts/mimir-distributed/ci/offline/test-enterprise-component-image-values.yaml new file mode 100644 index 00000000000..527377162ed --- /dev/null +++ b/operations/helm/charts/mimir-distributed/ci/offline/test-enterprise-component-image-values.yaml @@ -0,0 +1,97 @@ +# General setup +enterprise: + enabled: true +minio: + enabled: false + +# Individual components under test +admin_api: + image: + repository: test/admin-api + tag: 1-admin-api + +alertmanager: + image: + repository: test/alertmanager + tag: 1-alertmanager + +compactor: + image: + repository: test/compactor + tag: 1-compactor + +continuous_test: + image: + repository: test/continuous_test + tag: 1-continuous-test + +distributor: + image: + repository: test/distributor + tag: 1-distributor + +federation_frontend: + enabled: true + image: + repository: test/federation_frontend + tag: 1-federation-frontend + +gateway: + image: + repository: test/gateway + tag: 1-gateway + +ingester: + image: + repository: test/ingester + tag: 1-ingester + +overrides_exporter: + image: + repository: test/overrides_exporter + tag: 1-overrides-exporter + +querier: + image: + repository: test/querier + tag: 1-querier + +query_frontend: + image: + repository: test/query_frontend + tag: 1-query-frontend + +query_scheduler: + image: + repository: test/query_scheduler + tag: 1-query-scheduler + +ruler: + image: + repository: test/ruler + tag: 1-ruler + +ruler_querier: + image: + repository: test/ruler_querier + tag: 1-ruler-querier + +ruler_query_frontend: + image: + repository: test/ruler_query_frontend + tag: 1-ruler-query-frontend + +ruler_query_scheduler: + image: + repository: test/ruler_query_scheduler + tag: 1-ruler-query-scheduler + +smoke_test: + image: + repository: test/smoke_test + tag: 1-smoke-test + +store_gateway: + image: + repository: test/store_gateway + tag: 1-store-gateway diff --git a/operations/helm/charts/mimir-distributed/ci/offline/test-oss-component-image-values.yaml b/operations/helm/charts/mimir-distributed/ci/offline/test-oss-component-image-values.yaml new file mode 100644 index 00000000000..3d5bb2fcee8 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/ci/offline/test-oss-component-image-values.yaml @@ -0,0 +1,84 @@ +# General setup +minio: + enabled: false + +# Individual components under test +alertmanager: + image: + repository: test/alertmanager + tag: 1-alertmanager + +compactor: + image: + repository: test/compactor + tag: 1-compactor + +continuous_test: + image: + repository: test/continuous_test + tag: 1-continuous-test + +distributor: + image: + repository: test/distributor + tag: 1-distributor + +gateway: + image: + repository: test/gateway + tag: 1-gateway + +ingester: + image: + repository: test/ingester + tag: 1-ingester + +overrides_exporter: + image: + repository: test/overrides_exporter + tag: 1-overrides-exporter + +querier: + image: + repository: test/querier + tag: 1-querier + +query_frontend: + image: + repository: test/query_frontend + tag: 1-query-frontend + +query_scheduler: + image: + repository: test/query_scheduler + tag: 1-query-scheduler + +ruler: + image: + repository: test/ruler + tag: 1-ruler + +ruler_querier: + image: + repository: test/ruler_querier + tag: 1-ruler-querier + +ruler_query_frontend: + image: + repository: test/ruler_query_frontend + tag: 1-ruler-query-frontend + +ruler_query_scheduler: + image: + repository: test/ruler_query_scheduler + tag: 1-ruler-query-scheduler + +smoke_test: + image: + repository: test/smoke_test + tag: 1-smoke-test + +store_gateway: + image: + repository: test/store_gateway + tag: 1-store-gateway diff --git a/operations/helm/charts/mimir-distributed/templates/_helpers.tpl b/operations/helm/charts/mimir-distributed/templates/_helpers.tpl index 34b5a998805..a661a8c84b3 100644 --- a/operations/helm/charts/mimir-distributed/templates/_helpers.tpl +++ b/operations/helm/charts/mimir-distributed/templates/_helpers.tpl @@ -50,10 +50,20 @@ Create chart name and version as used by the chart label. {{- end -}} {{/* -Calculate image name based on whether enterprise features are requested +Build mimir image reference based on whether enterprise features are requested. The component local values always take precedence. +Params: + ctx = . context + component = component name */}} {{- define "mimir.imageReference" -}} -{{- if .Values.enterprise.enabled -}}{{ .Values.enterprise.image.repository }}:{{ .Values.enterprise.image.tag }}{{- else -}}{{ .Values.image.repository }}:{{ .Values.image.tag }}{{- end -}} +{{- $componentSection := include "mimir.componentSectionFromName" . | fromYaml -}} +{{- $image := $componentSection.image | default dict -}} +{{- if .ctx.Values.enterprise.enabled -}} + {{- $image = mustMerge $image .ctx.Values.enterprise.image -}} +{{- else -}} + {{- $image = mustMerge $image .ctx.Values.image -}} +{{- end -}} +{{ $image.repository }}:{{ $image.tag }} {{- end -}} {{/* diff --git a/operations/helm/charts/mimir-distributed/templates/admin-api/admin-api-dep.yaml b/operations/helm/charts/mimir-distributed/templates/admin-api/admin-api-dep.yaml index a59e56f8efe..69941aa34e8 100644 --- a/operations/helm/charts/mimir-distributed/templates/admin-api/admin-api-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/admin-api/admin-api-dep.yaml @@ -41,7 +41,7 @@ spec: {{- end }} containers: - name: admin-api - image: "{{ include "mimir.imageReference" . }}" + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "admin-api") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-target=admin-api" diff --git a/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-dep.yaml b/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-dep.yaml index f598c211aaf..ccf367000b9 100644 --- a/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-dep.yaml @@ -45,7 +45,7 @@ spec: {{- end }} containers: - name: alertmanager - image: "{{ include "mimir.imageReference" . }}" + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "alertmanager") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-target=alertmanager" diff --git a/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml b/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml index b59fd86aaef..68fb3b30f52 100644 --- a/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml +++ b/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml @@ -135,7 +135,7 @@ spec: {{ toYaml .Values.alertmanager.extraContainers | nindent 8 }} {{- end }} - name: alertmanager - image: "{{ include "mimir.imageReference" . }}" + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "alertmanager") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-target=alertmanager" diff --git a/operations/helm/charts/mimir-distributed/templates/compactor/compactor-statefulset.yaml b/operations/helm/charts/mimir-distributed/templates/compactor/compactor-statefulset.yaml index c13b2c148cf..b8a451373fd 100644 --- a/operations/helm/charts/mimir-distributed/templates/compactor/compactor-statefulset.yaml +++ b/operations/helm/charts/mimir-distributed/templates/compactor/compactor-statefulset.yaml @@ -115,7 +115,7 @@ spec: {{ toYaml .Values.compactor.extraContainers | nindent 8 }} {{- end }} - name: compactor - image: "{{ include "mimir.imageReference" . }}" + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "compactor") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-target=compactor" diff --git a/operations/helm/charts/mimir-distributed/templates/continuous_test/continuous-test-dep.yaml b/operations/helm/charts/mimir-distributed/templates/continuous_test/continuous-test-dep.yaml index 6cd407999ee..9e8e41734ac 100644 --- a/operations/helm/charts/mimir-distributed/templates/continuous_test/continuous-test-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/continuous_test/continuous-test-dep.yaml @@ -41,7 +41,7 @@ spec: {{- end }} containers: - name: continuous-test - image: "{{ include "mimir.imageReference" . }}" + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "continuous-test") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-target=continuous-test" diff --git a/operations/helm/charts/mimir-distributed/templates/distributor/distributor-dep.yaml b/operations/helm/charts/mimir-distributed/templates/distributor/distributor-dep.yaml index d7b21c3d171..52b855c427e 100644 --- a/operations/helm/charts/mimir-distributed/templates/distributor/distributor-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/distributor/distributor-dep.yaml @@ -46,7 +46,7 @@ spec: {{- end }} containers: - name: distributor - image: "{{ include "mimir.imageReference" . }}" + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "distributor") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-target=distributor" diff --git a/operations/helm/charts/mimir-distributed/templates/federation-frontend/federation-frontend-dep.yaml b/operations/helm/charts/mimir-distributed/templates/federation-frontend/federation-frontend-dep.yaml index f7fdf266e39..fda0e01403d 100644 --- a/operations/helm/charts/mimir-distributed/templates/federation-frontend/federation-frontend-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/federation-frontend/federation-frontend-dep.yaml @@ -40,7 +40,7 @@ spec: {{- end }} containers: - name: federation-frontend - image: {{ include "mimir.imageReference" . | quote }} + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "federation-frontend") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-target=federation-frontend" diff --git a/operations/helm/charts/mimir-distributed/templates/gateway/gateway-dep.yaml b/operations/helm/charts/mimir-distributed/templates/gateway/gateway-dep.yaml index 06652007ceb..86c2d89d983 100644 --- a/operations/helm/charts/mimir-distributed/templates/gateway/gateway-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/gateway/gateway-dep.yaml @@ -52,7 +52,7 @@ spec: containers: {{- if $isGEMGateway }} - name: gateway - image: {{ include "mimir.imageReference" $ | quote }} + image: {{ include "mimir.imageReference" (dict "ctx" $ "component" "gateway") }} imagePullPolicy: {{ $.Values.image.pullPolicy }} args: - "-target=gateway" diff --git a/operations/helm/charts/mimir-distributed/templates/graphite-proxy/graphite-querier/graphite-querier-dep.yaml b/operations/helm/charts/mimir-distributed/templates/graphite-proxy/graphite-querier/graphite-querier-dep.yaml index ea406df65d6..1e0d9180ece 100644 --- a/operations/helm/charts/mimir-distributed/templates/graphite-proxy/graphite-querier/graphite-querier-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/graphite-proxy/graphite-querier/graphite-querier-dep.yaml @@ -44,7 +44,7 @@ spec: {{ toYaml .Values.graphite.querier.extraContainers | nindent 8 }} {{- end }} - name: graphite-querier - image: {{ include "mimir.imageReference" . | quote }} + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "graphite-querier") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - -target=graphite-querier diff --git a/operations/helm/charts/mimir-distributed/templates/graphite-proxy/graphite-write-proxy/graphite-write-proxy-dep.yaml b/operations/helm/charts/mimir-distributed/templates/graphite-proxy/graphite-write-proxy/graphite-write-proxy-dep.yaml index 4aae8c5ee8a..d01df659eab 100644 --- a/operations/helm/charts/mimir-distributed/templates/graphite-proxy/graphite-write-proxy/graphite-write-proxy-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/graphite-proxy/graphite-write-proxy/graphite-write-proxy-dep.yaml @@ -44,7 +44,7 @@ spec: {{ toYaml .Values.graphite.write_proxy.extraContainers | nindent 8 }} {{- end }} - name: graphite-write-proxy - image: "{{ include "mimir.imageReference" . }}" + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "graphite-write-proxy") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - -target=graphite-write-proxy diff --git a/operations/helm/charts/mimir-distributed/templates/ingester/ingester-statefulset.yaml b/operations/helm/charts/mimir-distributed/templates/ingester/ingester-statefulset.yaml index e6e27a223e9..d13a949f46b 100644 --- a/operations/helm/charts/mimir-distributed/templates/ingester/ingester-statefulset.yaml +++ b/operations/helm/charts/mimir-distributed/templates/ingester/ingester-statefulset.yaml @@ -125,7 +125,7 @@ spec: {{ toYaml .Values.ingester.extraContainers | nindent 8 }} {{- end }} - name: ingester - image: "{{ include "mimir.imageReference" . }}" + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "ingester") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-target=ingester" diff --git a/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml b/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml index ac8dc5e7525..06d4a382978 100644 --- a/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml @@ -41,7 +41,7 @@ spec: {{- end }} containers: - name: overrides-exporter - image: "{{ include "mimir.imageReference" . }}" + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "overrides-exporter") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-target=overrides-exporter" diff --git a/operations/helm/charts/mimir-distributed/templates/querier/querier-dep.yaml b/operations/helm/charts/mimir-distributed/templates/querier/querier-dep.yaml index ce5f4a825fa..168bf6b9e71 100644 --- a/operations/helm/charts/mimir-distributed/templates/querier/querier-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/querier/querier-dep.yaml @@ -45,7 +45,7 @@ spec: {{- end }} containers: - name: querier - image: "{{ include "mimir.imageReference" . }}" + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "querier") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-target=querier" diff --git a/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml b/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml index 96f0e911478..31981b6b284 100644 --- a/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml @@ -46,7 +46,7 @@ spec: {{- end }} containers: - name: query-frontend - image: "{{ include "mimir.imageReference" . }}" + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "query-frontend") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-target=query-frontend" diff --git a/operations/helm/charts/mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml b/operations/helm/charts/mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml index 380282ecf80..783c55e3b1e 100644 --- a/operations/helm/charts/mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml @@ -40,7 +40,7 @@ spec: {{- end }} containers: - name: query-scheduler - image: "{{ include "mimir.imageReference" . }}" + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "query-scheduler") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-target=query-scheduler" diff --git a/operations/helm/charts/mimir-distributed/templates/ruler-querier/ruler-querier-dep.yaml b/operations/helm/charts/mimir-distributed/templates/ruler-querier/ruler-querier-dep.yaml index 6e17bcdc49e..6d5407a6622 100644 --- a/operations/helm/charts/mimir-distributed/templates/ruler-querier/ruler-querier-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/ruler-querier/ruler-querier-dep.yaml @@ -45,7 +45,7 @@ spec: {{- end }} containers: - name: ruler-querier - image: "{{ include "mimir.imageReference" . }}" + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "ruler-querier") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-target=querier" diff --git a/operations/helm/charts/mimir-distributed/templates/ruler-query-frontend/ruler-query-frontend-dep.yaml b/operations/helm/charts/mimir-distributed/templates/ruler-query-frontend/ruler-query-frontend-dep.yaml index 163bee57b84..73902433018 100644 --- a/operations/helm/charts/mimir-distributed/templates/ruler-query-frontend/ruler-query-frontend-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/ruler-query-frontend/ruler-query-frontend-dep.yaml @@ -46,7 +46,7 @@ spec: {{- end }} containers: - name: ruler-query-frontend - image: "{{ include "mimir.imageReference" . }}" + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "ruler-query-frontend") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-target=query-frontend" diff --git a/operations/helm/charts/mimir-distributed/templates/ruler-query-scheduler/ruler-query-scheduler-dep.yaml b/operations/helm/charts/mimir-distributed/templates/ruler-query-scheduler/ruler-query-scheduler-dep.yaml index fd08e7327f7..f656e8a0fe7 100644 --- a/operations/helm/charts/mimir-distributed/templates/ruler-query-scheduler/ruler-query-scheduler-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/ruler-query-scheduler/ruler-query-scheduler-dep.yaml @@ -40,7 +40,7 @@ spec: {{- end }} containers: - name: ruler-query-scheduler - image: "{{ include "mimir.imageReference" . }}" + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "ruler-query-scheduler") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-target=query-scheduler" diff --git a/operations/helm/charts/mimir-distributed/templates/ruler/ruler-dep.yaml b/operations/helm/charts/mimir-distributed/templates/ruler/ruler-dep.yaml index bba58d3f7f1..c90d46cc9f0 100644 --- a/operations/helm/charts/mimir-distributed/templates/ruler/ruler-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/ruler/ruler-dep.yaml @@ -43,7 +43,7 @@ spec: {{- end }} containers: - name: ruler - image: "{{ include "mimir.imageReference" . }}" + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "ruler") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-target=ruler" diff --git a/operations/helm/charts/mimir-distributed/templates/smoke-test/smoke-test-job.yaml b/operations/helm/charts/mimir-distributed/templates/smoke-test/smoke-test-job.yaml index ba0157c2a39..55e8adbef81 100644 --- a/operations/helm/charts/mimir-distributed/templates/smoke-test/smoke-test-job.yaml +++ b/operations/helm/charts/mimir-distributed/templates/smoke-test/smoke-test-job.yaml @@ -37,7 +37,7 @@ spec: {{- toYaml .Values.smoke_test.initContainers | nindent 8 }} containers: - name: smoke-test - image: "{{ include "mimir.imageReference" . }}" + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "smoke-test") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-target=continuous-test" diff --git a/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml b/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml index 5768bfc3e37..43bf4f3f025 100644 --- a/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +++ b/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml @@ -125,7 +125,7 @@ spec: {{ toYaml .Values.store_gateway.extraContainers | nindent 8 }} {{- end }} - name: store-gateway - image: "{{ include "mimir.imageReference" . }}" + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "store-gateway") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-target=store-gateway" diff --git a/operations/helm/charts/mimir-distributed/templates/tokengen/tokengen-job.yaml b/operations/helm/charts/mimir-distributed/templates/tokengen/tokengen-job.yaml index b8d92686bf8..e157997e7a0 100644 --- a/operations/helm/charts/mimir-distributed/templates/tokengen/tokengen-job.yaml +++ b/operations/helm/charts/mimir-distributed/templates/tokengen/tokengen-job.yaml @@ -40,7 +40,7 @@ spec: {{- toYaml .Values.tokengenJob.initContainers | nindent 8 }} containers: - name: tokengen - image: "{{ include "mimir.imageReference" . }}" + image: {{ include "mimir.imageReference" (dict "ctx" . "component" "tokengen") }} imagePullPolicy: {{ .Values.image.pullPolicy }} args: - "-target=tokengen" diff --git a/operations/helm/charts/mimir-distributed/values.yaml b/operations/helm/charts/mimir-distributed/values.yaml index 4a4f9982f06..ff223192cdd 100644 --- a/operations/helm/charts/mimir-distributed/values.yaml +++ b/operations/helm/charts/mimir-distributed/values.yaml @@ -551,6 +551,12 @@ alertmanager: # E.g. if 'replicas' is set to 4 and there are 3 zones, then 4/3=1.33 and after rounding up it means 2 pods per zone are started. replicas: 1 + # -- Allows to override the container image of the alertmanager component. + # When set it takes precedence over what is defined in global "image" or "enterprise.image" sections. + image: + # repository: grafana/mimir + # tag: 2.15.0 + statefulSet: enabled: true @@ -814,6 +820,12 @@ distributor: # -- Whether to render the manifests related to the distributor component. enabled: true + # -- Allows to override the container image of the distributor component. + # When set it takes precedence over what is defined in global "image" or "enterprise.image" sections. + image: + # repository: grafana/mimir + # tag: 2.15.0 + # Setting it to null will produce a deployment without replicas set, allowing you to use autoscaling with the deployment replicas: 1 @@ -927,6 +939,12 @@ ingester: # -- Whether to render the manifests related to the ingester component. enabled: true + # -- Allows to override the container image of the ingester component. + # When set it takes precedence over what is defined in global "image" or "enterprise.image" sections. + image: + # repository: grafana/mimir + # tag: 2.15.0 + # -- Total number of replicas for the ingester across all availability zones # If ingester.zoneAwareReplication.enabled=false, this number is taken as is. # Otherwise each zone starts `ceil(replicas / number_of_zones)` number of pods. @@ -1152,6 +1170,13 @@ ingester: overrides_exporter: enabled: true + + # -- Allows to override the container image of the overrides-exporter component. + # When set it takes precedence over what is defined in global "image" or "enterprise.image" sections. + image: + # repository: grafana/mimir + # tag: 2.15.0 + replicas: 1 annotations: {} @@ -1241,6 +1266,13 @@ overrides_exporter: ruler: enabled: true + + # -- Allows to override the container image of the ruler component. + # When set it takes precedence over what is defined in global "image" or "enterprise.image" sections. + image: + # repository: grafana/mimir + # tag: 2.15.0 + replicas: 1 # -- [Experimental] Configure autoscaling via KEDA (https://keda.sh). This requires having @@ -1363,6 +1395,12 @@ ruler: # -- Only deployed if .Values.ruler.remoteEvaluationDedicatedQueryPath ruler_querier: + # -- Allows to override the container image of the ruler-querier component. + # When set it takes precedence over what is defined in global "image" or "enterprise.image" sections. + image: + # repository: grafana/mimir + # tag: 2.15.0 + replicas: 2 # -- [Experimental] Configure autoscaling via KEDA (https://keda.sh). This requires having @@ -1481,6 +1519,12 @@ ruler_querier: # -- Only deployed if .Values.ruler.remoteEvaluationDedicatedQueryPath ruler_query_frontend: + # -- Allows to override the container image of the ruler-query-frontend component. + # When set it takes precedence over what is defined in global "image" or "enterprise.image" sections. + image: + # repository: grafana/mimir + # tag: 2.15.0 + # Setting it to null will produce a deployment without replicas set, allowing you to use autoscaling with the deployment replicas: 1 @@ -1591,6 +1635,12 @@ ruler_query_frontend: # -- Only deployed if .Values.ruler.remoteEvaluationDedicatedQueryPath ruler_query_scheduler: + # -- Allows to override the container image of the ruler-query-scheduler component. + # When set it takes precedence over what is defined in global "image" or "enterprise.image" sections. + image: + # repository: grafana/mimir + # tag: 2.15.0 + replicas: 2 service: @@ -1680,6 +1730,12 @@ querier: # -- Whether to render the manifests related to the querier component. enabled: true + # -- Allows to override the container image of the querier component. + # When set it takes precedence over what is defined in global "image" or "enterprise.image" sections. + image: + # repository: grafana/mimir + # tag: 2.15.0 + replicas: 2 # -- [Experimental] Configure autoscaling via KEDA (https://keda.sh). This requires having @@ -1811,6 +1867,12 @@ query_frontend: # -- Whether to render the manifests related to the query-frontend component. enabled: true + # -- Allows to override the container image of the query-frontend component. + # When set it takes precedence over what is defined in global "image" or "enterprise.image" sections. + image: + # repository: grafana/mimir + # tag: 2.15.0 + # Setting it to null will produce a deployment without replicas set, allowing you to use autoscaling with the deployment replicas: 1 @@ -1921,6 +1983,13 @@ query_frontend: query_scheduler: enabled: true + + # -- Allows to override the container image of the query-scheduler component. + # When set it takes precedence over what is defined in global "image" or "enterprise.image" sections. + image: + # repository: grafana/mimir + # tag: 2.15.0 + replicas: 2 service: @@ -2010,6 +2079,12 @@ store_gateway: # -- Whether to render the manifests related to the store-gateway component. enabled: true + # -- Allows to override the container image of the store-gateway component. + # When set it takes precedence over what is defined in global "image" or "enterprise.image" sections. + image: + # repository: grafana/mimir + # tag: 2.15.0 + # -- Total number of replicas for the store-gateway across all availability zones # If store_gateway.zoneAwareReplication.enabled=false, this number is taken as is. # Otherwise each zone starts `ceil(replicas / number_of_zones)` number of pods. @@ -2227,6 +2302,12 @@ compactor: # -- Whether to render the manifests related to the compactor component. enabled: true + # -- Allows to override the container image of the compactor component. + # When set it takes precedence over what is defined in global "image" or "enterprise.image" sections. + image: + # repository: grafana/mimir + # tag: 2.15.0 + replicas: 1 service: @@ -3258,6 +3339,13 @@ gateway: # The value of this field is ignored when gateway.enabled=false. enabledNonEnterprise: false + # -- Allows to override the container image of the gateway component. + # When set it takes precedence over what is defined in global "image" or "enterprise.image" sections. + # Only applies to the GEM gateway. Use gateway.nginx.image for the nginx gateway. + image: + # repository: grafana/mimir-enterprise + # tag: 2.15.0 + # -- Number of replicas for the Deployment replicas: 1 @@ -3990,6 +4078,12 @@ admin_api: # -- Whether to render the manifests related to the admin-api component. enabled: true + # -- Allows to override the container image of the admin-api component. + # When set it takes precedence over what is defined in global "image" or "enterprise.image" sections. + image: + # repository: grafana/enterprise-metrics + # tag: 2.15.0 + replicas: 1 annotations: {} @@ -4466,6 +4560,12 @@ federation_frontend: # -- Specifies whether the federation-frontend should be enabled enabled: false + # -- Allows to override the container image of the federation-frontend component. + # When set it takes precedence over what is defined in global "image" or "enterprise.image" sections. + image: + # repository: grafana/mimir + # tag: 2.15.0 + # -- Specifies whether other components should be deployed. This is an easy way to deploy the resources necessary for a standalone # federation-frontend without the rest of the GEM components. disableOtherComponents: false @@ -4559,6 +4659,11 @@ federation_frontend: # that writing and reading metrics works. Currently not supported for # installations using GEM token-based authentication. smoke_test: + # -- Allows to override the container image of the smoke-test component. + # When set it takes precedence over what is defined in global "image" or "enterprise.image" sections. + image: + # repository: grafana/mimir + # tag: 2.15.0 # -- Controls the backoffLimit on the Kubernetes Job. The Job is marked as failed after that many retries. backoffLimit: 5 # The image section has been removed as continuous test is now a module of the regular Mimir image. @@ -4577,6 +4682,11 @@ smoke_test: # https://grafana.com/docs/mimir/latest/manage/tools/mimir-continuous-test/ continuous_test: enabled: false + # -- Allows to override the container image of the continuous-test component. + # When set it takes precedence over what is defined in global "image" or "enterprise.image" sections. + image: + # repository: grafana/mimir-continuous-test + # tag: 2.15.0 # -- Number of replicas to start of continuous test replicas: 1 # The image section has been removed as continuous test is now a module of the regular Mimir image. diff --git a/operations/helm/scripts/build.sh b/operations/helm/scripts/build.sh index 6178746e95c..e7b9b6922f3 100755 --- a/operations/helm/scripts/build.sh +++ b/operations/helm/scripts/build.sh @@ -83,7 +83,7 @@ function generate_manifests() { helm template "${ARGS[@]}" 1>/dev/null cp -r "${INTERMEDIATE_OUTPUT_DIR}" "${OUTPUT_DIR}" rm "${OUTPUT_DIR}/${CHART_NAME}/templates/values-for-rego-tests.yaml" - find "${OUTPUT_DIR}/${CHART_NAME}/templates" -type f -print0 | xargs -0 "${SED}" -E -i -- "/^\s+(checksum\/(alertmanager-fallback-)?config|(helm.sh\/)?chart|app.kubernetes.io\/version|image: \"grafana\/(mimir|mimir-continuous-test|enterprise-metrics)):/d" + find "${OUTPUT_DIR}/${CHART_NAME}/templates" -type f -print0 | xargs -0 "${SED}" -E -i -- "/^\s+(checksum\/(alertmanager-fallback-)?config|(helm.sh\/)?chart|app.kubernetes.io\/version|image: \"?grafana\/(mimir|mimir-continuous-test|enterprise-metrics)):/d" } for FILEPATH in $TESTS; do diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml new file mode 100644 index 00000000000..e1595094cd4 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -0,0 +1,65 @@ +--- +# Source: mimir-distributed/charts/rollout_operator/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-enterprise-component-image-values-rollout-operator + labels: + helm.sh/chart: rollout-operator-0.22.0 + app.kubernetes.io/name: rollout-operator + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/version: "v0.22.0" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + minReadySeconds: 10 + selector: + matchLabels: + app.kubernetes.io/name: rollout-operator + app.kubernetes.io/instance: test-enterprise-component-image-values + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + app.kubernetes.io/name: rollout-operator + app.kubernetes.io/instance: test-enterprise-component-image-values + spec: + serviceAccountName: test-enterprise-component-image-values-rollout-operator + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + containers: + - name: rollout-operator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + image: "grafana/rollout-operator:v0.22.0" + imagePullPolicy: IfNotPresent + args: + - -kubernetes.namespace=citestns + ports: + - name: http-metrics + containerPort: 8001 + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: + limits: + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/role.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/role.yaml new file mode 100644 index 00000000000..46509b45938 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/role.yaml @@ -0,0 +1,36 @@ +--- +# Source: mimir-distributed/charts/rollout_operator/templates/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: test-enterprise-component-image-values-rollout-operator + labels: + helm.sh/chart: rollout-operator-0.22.0 + app.kubernetes.io/name: rollout-operator + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/version: "v0.22.0" + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - list + - get + - watch + - delete +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - list + - get + - watch +- apiGroups: + - apps + resources: + - statefulsets/status + verbs: + - update diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/rolebinding.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/rolebinding.yaml new file mode 100644 index 00000000000..67653825db3 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/charts/rollout_operator/templates/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: test-enterprise-component-image-values-rollout-operator + labels: + helm.sh/chart: rollout-operator-0.22.0 + app.kubernetes.io/name: rollout-operator + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/version: "v0.22.0" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: test-enterprise-component-image-values-rollout-operator +subjects: +- kind: ServiceAccount + name: test-enterprise-component-image-values-rollout-operator diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml new file mode 100644 index 00000000000..eb9d559580c --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +--- +# Source: mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: test-enterprise-component-image-values-rollout-operator + labels: + helm.sh/chart: rollout-operator-0.22.0 + app.kubernetes.io/name: rollout-operator + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/version: "v0.22.0" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml new file mode 100644 index 00000000000..109731e92d8 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml @@ -0,0 +1,119 @@ +--- +# Source: mimir-distributed/templates/admin-api/admin-api-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + {} + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: admin-api + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: test-enterprise-component-image-values-mimir-admin-api + namespace: "citestns" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: admin-api + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admin-api + app.kubernetes.io/part-of: memberlist + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-enterprise-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + containers: + - name: admin-api + image: test/admin-api:1-admin-api + imagePullPolicy: IfNotPresent + args: + - "-target=admin-api" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: license + mountPath: /license + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 10m + memory: 32Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: admin-api + terminationGracePeriodSeconds: 60 + volumes: + - name: config + configMap: + name: test-enterprise-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: test-enterprise-component-image-values-mimir-runtime + - name: license + secret: + secretName: test-enterprise-component-image-values-mimir-license + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/admin-api/admin-api-pdb.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/admin-api/admin-api-pdb.yaml new file mode 100644 index 00000000000..40dc10a76b2 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/admin-api/admin-api-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/admin-api/admin-api-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-enterprise-component-image-values-mimir-admin-api + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: admin-api + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: admin-api + maxUnavailable: 1 diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/admin-api/admin-api-svc.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/admin-api/admin-api-svc.yaml new file mode 100644 index 00000000000..df4124e608e --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/admin-api/admin-api-svc.yaml @@ -0,0 +1,30 @@ +--- +# Source: mimir-distributed/templates/admin-api/admin-api-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-admin-api + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: admin-api + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: admin-api diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-config.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-config.yaml new file mode 100644 index 00000000000..97f402115a6 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-config.yaml @@ -0,0 +1,21 @@ +--- +# Source: mimir-distributed/templates/alertmanager/alertmanager-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-enterprise-component-image-values-mimir-alertmanager-fallback-config + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: alertmanager + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +data: + alertmanager_fallback_config.yaml: | + receivers: + - name: default-receiver + route: + receiver: default-receiver diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml new file mode 100644 index 00000000000..e434d33fdb5 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-enterprise-component-image-values-mimir-alertmanager + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: alertmanager + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: alertmanager + maxUnavailable: 1 diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml new file mode 100644 index 00000000000..5d337ede35e --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml @@ -0,0 +1,139 @@ +--- +# Source: mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-enterprise-component-image-values-mimir-alertmanager + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: alertmanager + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: alertmanager + updateStrategy: + type: RollingUpdate + serviceName: test-enterprise-component-image-values-mimir-alertmanager + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "1Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: alertmanager + app.kubernetes.io/part-of: memberlist + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-enterprise-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: alertmanager + terminationGracePeriodSeconds: 900 + volumes: + - name: config + configMap: + name: test-enterprise-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: license + secret: + secretName: test-enterprise-component-image-values-mimir-license + - name: runtime-config + configMap: + name: test-enterprise-component-image-values-mimir-runtime + - name: tmp + emptyDir: {} + - name: active-queries + emptyDir: {} + - name: alertmanager-fallback-config + configMap: + name: test-enterprise-component-image-values-mimir-alertmanager-fallback-config + containers: + - name: alertmanager + image: test/alertmanager:1-alertmanager + imagePullPolicy: IfNotPresent + args: + - "-target=alertmanager" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + # Prometheus HTTP client used to send alerts has a hard-coded idle + # timeout of 5 minutes, therefore the server timeout for Alertmanager + # needs to be higher to avoid connections being closed abruptly. + - "-server.http-idle-timeout=6m" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: license + mountPath: /license + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: alertmanager-fallback-config + mountPath: /configs/ + - name: tmp + mountPath: /tmp + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 10m + memory: 32Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml new file mode 100644 index 00000000000..009147fcd48 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml @@ -0,0 +1,36 @@ +--- +# Source: mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-alertmanager-headless + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: alertmanager + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + prometheus.io/service-monitor: "false" + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + - port: 9094 + protocol: TCP + name: cluster + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: alertmanager diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml new file mode 100644 index 00000000000..4a1f092f07c --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml @@ -0,0 +1,30 @@ +--- +# Source: mimir-distributed/templates/alertmanager/alertmanager-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-alertmanager + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: alertmanager + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: alertmanager diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/compactor/compactor-pdb.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/compactor/compactor-pdb.yaml new file mode 100644 index 00000000000..11fe6c5fb6c --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/compactor/compactor-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/compactor/compactor-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-enterprise-component-image-values-mimir-compactor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: compactor + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: compactor + maxUnavailable: 1 diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml new file mode 100644 index 00000000000..e2b5660fc4e --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml @@ -0,0 +1,127 @@ +--- +# Source: mimir-distributed/templates/compactor/compactor-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-enterprise-component-image-values-mimir-compactor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: compactor + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + podManagementPolicy: OrderedReady + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: compactor + updateStrategy: + type: RollingUpdate + serviceName: test-enterprise-component-image-values-mimir-compactor + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: compactor + app.kubernetes.io/part-of: memberlist + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-enterprise-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: compactor + terminationGracePeriodSeconds: 900 + volumes: + - name: config + configMap: + name: test-enterprise-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: license + secret: + secretName: test-enterprise-component-image-values-mimir-license + - name: runtime-config + configMap: + name: test-enterprise-component-image-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: compactor + image: test/compactor:1-compactor + imagePullPolicy: IfNotPresent + args: + - "-target=compactor" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: license + mountPath: /license + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml new file mode 100644 index 00000000000..d8cbaff8b6f --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml @@ -0,0 +1,30 @@ +--- +# Source: mimir-distributed/templates/compactor/compactor-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-compactor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: compactor + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: compactor diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml new file mode 100644 index 00000000000..cfaa75dea88 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml @@ -0,0 +1,132 @@ +--- +# Source: mimir-distributed/templates/distributor/distributor-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-enterprise-component-image-values-mimir-distributor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: distributor + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + # If replicas is not number (when using values file it's float64, when using --set arg it's int64) and is false (i.e. null) don't set it + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: distributor + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: distributor + app.kubernetes.io/part-of: memberlist + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-enterprise-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + containers: + - name: distributor + image: test/distributor:1-distributor + imagePullPolicy: IfNotPresent + args: + - "-target=distributor" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + # When write requests go through distributors via gRPC, we want gRPC clients to re-resolve the distributors DNS + # endpoint before the distributor process is terminated, in order to avoid any failures during graceful shutdown. + # To achieve it, we set a shutdown delay greater than the gRPC max connection age. + - "-server.grpc.keepalive.max-connection-age=60s" + - "-server.grpc.keepalive.max-connection-age-grace=5m" + - "-server.grpc.keepalive.max-connection-idle=1m" + - "-shutdown-delay=90s" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: license + mountPath: /license + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "GOMAXPROCS" + value: "8" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: distributor + terminationGracePeriodSeconds: 100 + volumes: + - name: config + configMap: + name: test-enterprise-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: license + secret: + secretName: test-enterprise-component-image-values-mimir-license + - name: runtime-config + configMap: + name: test-enterprise-component-image-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/distributor/distributor-pdb.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/distributor/distributor-pdb.yaml new file mode 100644 index 00000000000..b8cdcbb162e --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/distributor/distributor-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/distributor/distributor-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-enterprise-component-image-values-mimir-distributor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: distributor + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: distributor + maxUnavailable: 1 diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml new file mode 100644 index 00000000000..cb1a21c5f43 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml @@ -0,0 +1,32 @@ +--- +# Source: mimir-distributed/templates/distributor/distributor-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-distributor-headless + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: distributor + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + prometheus.io/service-monitor: "false" + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: distributor diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml new file mode 100644 index 00000000000..78a04e1366b --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml @@ -0,0 +1,30 @@ +--- +# Source: mimir-distributed/templates/distributor/distributor-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-distributor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: distributor + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: distributor diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/federation-frontend/federation-frontend-dep.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/federation-frontend/federation-frontend-dep.yaml new file mode 100644 index 00000000000..87b11e35d8d --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/federation-frontend/federation-frontend-dep.yaml @@ -0,0 +1,96 @@ +--- +# Source: mimir-distributed/templates/federation-frontend/federation-frontend-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-enterprise-component-image-values-mimir-federation-frontend + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: federation-frontend + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: federation-frontend + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: federation-frontend + annotations: + spec: + serviceAccountName: test-enterprise-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + containers: + - name: federation-frontend + image: test/federation_frontend:1-federation-frontend + imagePullPolicy: IfNotPresent + args: + - "-target=federation-frontend" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: license + mountPath: /license + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + + terminationGracePeriodSeconds: 180 + volumes: + - name: config + configMap: + name: test-enterprise-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: license + secret: + secretName: test-enterprise-component-image-values-mimir-license + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/federation-frontend/federation-frontend-pdb.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/federation-frontend/federation-frontend-pdb.yaml new file mode 100644 index 00000000000..ac77b85949a --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/federation-frontend/federation-frontend-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/federation-frontend/federation-frontend-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-enterprise-component-image-values-mimir-federation-frontend + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: federation-frontend + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: federation-frontend + maxUnavailable: 1 diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/federation-frontend/federation-frontend-svc.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/federation-frontend/federation-frontend-svc.yaml new file mode 100644 index 00000000000..7ddc1e09459 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/federation-frontend/federation-frontend-svc.yaml @@ -0,0 +1,29 @@ +--- +# Source: mimir-distributed/templates/federation-frontend/federation-frontend-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-federation-frontend + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: federation-frontend + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: federation-frontend diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml new file mode 100644 index 00000000000..53bc4a7dd8b --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml @@ -0,0 +1,110 @@ +--- +# Source: mimir-distributed/templates/gateway/gateway-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + {} + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: gateway + app.kubernetes.io/managed-by: Helm + name: test-enterprise-component-image-values-mimir-gateway + namespace: "citestns" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: gateway + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: gateway + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-enterprise-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + containers: + - name: gateway + image: test/gateway:1-gateway + imagePullPolicy: IfNotPresent + args: + - "-target=gateway" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: license + mountPath: /license + - name: tmp + mountPath: /data + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: gateway + terminationGracePeriodSeconds: 30 + volumes: + - name: config + configMap: + name: test-enterprise-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: test-enterprise-component-image-values-mimir-runtime + - name: tmp + emptyDir: {} + - name: license + secret: + secretName: test-enterprise-component-image-values-mimir-license + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/gateway/gateway-pdb.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/gateway/gateway-pdb.yaml new file mode 100644 index 00000000000..a3ebb8924ce --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/gateway/gateway-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/gateway/gateway-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-enterprise-component-image-values-mimir-gateway + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: gateway + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: gateway + maxUnavailable: 1 diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/gateway/gateway-svc.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/gateway/gateway-svc.yaml new file mode 100644 index 00000000000..100d5994d5e --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/gateway/gateway-svc.yaml @@ -0,0 +1,29 @@ +--- +# Source: mimir-distributed/templates/gateway/gateway-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-gateway + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: gateway + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 80 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 8080 + protocol: TCP + name: legacy-http-metrics + targetPort: http-metrics + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: gateway diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml new file mode 100644 index 00000000000..b4a4f458c10 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml @@ -0,0 +1,26 @@ +--- +# Source: mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-gossip-ring + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: gossip-ring + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: gossip-ring + port: 7946 + appProtocol: tcp + protocol: TCP + targetPort: 7946 + publishNotReadyAddresses: true + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/part-of: memberlist diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml new file mode 100644 index 00000000000..d1a88d7818f --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/ingester/ingester-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-enterprise-component-image-values-mimir-ingester + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ingester + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ingester + maxUnavailable: 1 diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml new file mode 100644 index 00000000000..731536ce1e0 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml @@ -0,0 +1,426 @@ +--- +# Source: mimir-distributed/templates/ingester/ingester-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-enterprise-component-image-values-mimir-ingester-zone-a + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-a" + rollout-group: ingester + zone: zone-a + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-a + updateStrategy: + type: OnDelete + serviceName: test-enterprise-component-image-values-mimir-ingester-headless + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + name: "ingester-zone-a" + rollout-group: ingester + zone: zone-a + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-enterprise-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ingester + terminationGracePeriodSeconds: 1200 + volumes: + - name: config + configMap: + name: test-enterprise-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: license + secret: + secretName: test-enterprise-component-image-values-mimir-license + - name: runtime-config + configMap: + name: test-enterprise-component-image-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: ingester + image: test/ingester:1-ingester + imagePullPolicy: IfNotPresent + args: + - "-target=ingester" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-ingester.ring.instance-availability-zone=zone-a" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: license + mountPath: /license + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + + - name: "GOMAXPROCS" + value: "4" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" +--- +# Source: mimir-distributed/templates/ingester/ingester-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-enterprise-component-image-values-mimir-ingester-zone-b + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-b" + rollout-group: ingester + zone: zone-b + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-b + updateStrategy: + type: OnDelete + serviceName: test-enterprise-component-image-values-mimir-ingester-headless + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + name: "ingester-zone-b" + rollout-group: ingester + zone: zone-b + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-enterprise-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ingester + terminationGracePeriodSeconds: 1200 + volumes: + - name: config + configMap: + name: test-enterprise-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: license + secret: + secretName: test-enterprise-component-image-values-mimir-license + - name: runtime-config + configMap: + name: test-enterprise-component-image-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: ingester + image: test/ingester:1-ingester + imagePullPolicy: IfNotPresent + args: + - "-target=ingester" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-ingester.ring.instance-availability-zone=zone-b" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: license + mountPath: /license + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + + - name: "GOMAXPROCS" + value: "4" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" +--- +# Source: mimir-distributed/templates/ingester/ingester-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-enterprise-component-image-values-mimir-ingester-zone-c + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-c" + rollout-group: ingester + zone: zone-c + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-c + updateStrategy: + type: OnDelete + serviceName: test-enterprise-component-image-values-mimir-ingester-headless + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + name: "ingester-zone-c" + rollout-group: ingester + zone: zone-c + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-enterprise-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ingester + terminationGracePeriodSeconds: 1200 + volumes: + - name: config + configMap: + name: test-enterprise-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: license + secret: + secretName: test-enterprise-component-image-values-mimir-license + - name: runtime-config + configMap: + name: test-enterprise-component-image-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: ingester + image: test/ingester:1-ingester + imagePullPolicy: IfNotPresent + args: + - "-target=ingester" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-ingester.ring.instance-availability-zone=zone-c" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: license + mountPath: /license + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + + - name: "GOMAXPROCS" + value: "4" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml new file mode 100644 index 00000000000..cbb8a66132a --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml @@ -0,0 +1,32 @@ +--- +# Source: mimir-distributed/templates/ingester/ingester-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-ingester-headless + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + prometheus.io/service-monitor: "false" + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ingester diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml new file mode 100644 index 00000000000..44649c7a1b8 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml @@ -0,0 +1,105 @@ +--- +# Source: mimir-distributed/templates/ingester/ingester-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-ingester-zone-a + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-a" + rollout-group: ingester + zone: zone-a + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-a +--- +# Source: mimir-distributed/templates/ingester/ingester-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-ingester-zone-b + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-b" + rollout-group: ingester + zone: zone-b + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-b +--- +# Source: mimir-distributed/templates/ingester/ingester-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-ingester-zone-c + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-c" + rollout-group: ingester + zone: zone-c + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-c diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/license-secret.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/license-secret.yaml new file mode 100644 index 00000000000..b9ac6dec040 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/license-secret.yaml @@ -0,0 +1,13 @@ +--- +# Source: mimir-distributed/templates/license-secret.yaml +apiVersion: v1 +kind: Secret +metadata: + name: test-enterprise-component-image-values-mimir-license + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +data: + license.jwt: Tk9UQVZBTElETElDRU5TRQ== diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/mimir-config.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/mimir-config.yaml new file mode 100644 index 00000000000..c6f1c320694 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/mimir-config.yaml @@ -0,0 +1,137 @@ +--- +# Source: mimir-distributed/templates/mimir-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-enterprise-component-image-values-mimir-config + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +data: + mimir.yaml: | + + activity_tracker: + filepath: /active-query-tracker/activity.log + admin_api: + leader_election: + enabled: true + ring: + kvstore: + store: memberlist + admin_client: + storage: null + alertmanager: + data_dir: /data + enable_api: true + external_url: /alertmanager + fallback_config_file: /configs/alertmanager_fallback_config.yaml + auth: + admin: + pass_access_policy_name: true + pass_token_name: true + type: enterprise + blocks_storage: + backend: s3 + bucket_store: + sync_dir: /data/tsdb-sync + tsdb: + dir: /data/tsdb + head_compaction_interval: 15m + wal_replay_concurrency: 3 + cluster_name: test-enterprise-component-image-values + compactor: + compaction_interval: 30m + data_dir: /data + deletion_delay: 2h + first_level_compaction_wait_period: 25m + max_closing_blocks_concurrency: 2 + max_opening_blocks_concurrency: 4 + sharding_ring: + heartbeat_period: 1m + heartbeat_timeout: 4m + wait_stability_min_duration: 1m + symbols_flushers_concurrency: 4 + distributor: + ring: + heartbeat_period: 1m + heartbeat_timeout: 4m + frontend: + log_query_request_headers: X-Access-Policy-Name,X-Token-Name + parallelize_shardable_queries: true + scheduler_address: test-enterprise-component-image-values-mimir-query-scheduler-headless.citestns.svc:9095 + frontend_worker: + grpc_client_config: + max_send_msg_size: 419430400 + scheduler_address: test-enterprise-component-image-values-mimir-query-scheduler-headless.citestns.svc:9095 + gateway: + proxy: + admin_api: + url: http://test-enterprise-component-image-values-mimir-admin-api.citestns.svc:8080 + alertmanager: + url: http://test-enterprise-component-image-values-mimir-alertmanager-headless.citestns.svc:8080 + compactor: + url: http://test-enterprise-component-image-values-mimir-compactor.citestns.svc:8080 + default: + url: http://test-enterprise-component-image-values-mimir-admin-api.citestns.svc:8080 + distributor: + url: dns:///test-enterprise-component-image-values-mimir-distributor-headless.citestns.svc.cluster.local.:9095 + ingester: + url: http://test-enterprise-component-image-values-mimir-ingester-headless.citestns.svc:8080 + query_frontend: + url: http://test-enterprise-component-image-values-mimir-federation-frontend.citestns.svc:8080 + ruler: + url: http://test-enterprise-component-image-values-mimir-ruler.citestns.svc:8080 + store_gateway: + url: http://test-enterprise-component-image-values-mimir-store-gateway-headless.citestns.svc:8080 + ingester: + ring: + final_sleep: 0s + heartbeat_period: 2m + heartbeat_timeout: 10m + num_tokens: 512 + tokens_file_path: /data/tokens + unregister_on_shutdown: false + zone_awareness_enabled: true + ingester_client: + grpc_client_config: + max_recv_msg_size: 104857600 + max_send_msg_size: 104857600 + instrumentation: + distributor_client: + address: dns:///test-enterprise-component-image-values-mimir-distributor-headless.citestns.svc.cluster.local.:9095 + enabled: true + license: + path: /license/license.jwt + limits: + max_cache_freshness: 10m + max_query_parallelism: 240 + max_total_query_length: 12000h + memberlist: + abort_if_cluster_join_fails: false + compression_enabled: false + join_members: + - dns+test-enterprise-component-image-values-mimir-gossip-ring.citestns.svc.cluster.local.:7946 + querier: + max_concurrent: 16 + query_scheduler: + max_outstanding_requests_per_tenant: 800 + ruler: + alertmanager_url: dnssrvnoa+http://_http-metrics._tcp.test-enterprise-component-image-values-mimir-alertmanager-headless.citestns.svc.cluster.local./alertmanager + enable_api: true + rule_path: /data + runtime_config: + file: /var/mimir/runtime.yaml + store_gateway: + sharding_ring: + heartbeat_period: 1m + heartbeat_timeout: 4m + kvstore: + prefix: multi-zone/ + tokens_file_path: /data/tokens + unregister_on_shutdown: false + wait_stability_min_duration: 1m + zone_awareness_enabled: true + usage_stats: + installation_mode: helm diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml new file mode 100644 index 00000000000..a8d1282f5ec --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml @@ -0,0 +1,109 @@ +--- +# Source: mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + {} + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: overrides-exporter + app.kubernetes.io/managed-by: Helm + name: test-enterprise-component-image-values-mimir-overrides-exporter + namespace: "citestns" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: overrides-exporter + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: overrides-exporter + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-enterprise-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + containers: + - name: overrides-exporter + image: test/overrides_exporter:1-overrides-exporter + imagePullPolicy: IfNotPresent + args: + - "-target=overrides-exporter" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: license + mountPath: /license + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + livenessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + + terminationGracePeriodSeconds: 30 + volumes: + - name: config + configMap: + name: test-enterprise-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: license + secret: + secretName: test-enterprise-component-image-values-mimir-license + - name: runtime-config + configMap: + name: test-enterprise-component-image-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml new file mode 100644 index 00000000000..eed973e61cc --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-enterprise-component-image-values-mimir-overrides-exporter + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: overrides-exporter + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: overrides-exporter + maxUnavailable: 1 diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml new file mode 100644 index 00000000000..c3280ec94cb --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml @@ -0,0 +1,29 @@ +--- +# Source: mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-overrides-exporter + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: overrides-exporter + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: overrides-exporter diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml new file mode 100644 index 00000000000..5b3b2bb67da --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml @@ -0,0 +1,40 @@ +--- +# Source: mimir-distributed/templates/podsecuritypolicy.yaml +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: test-enterprise-component-image-values-mimir + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + annotations: + "seccomp.security.alpha.kubernetes.io/allowedProfileNames": runtime/default +spec: + privileged: false + allowPrivilegeEscalation: false + volumes: + - 'configMap' + - 'emptyDir' + - 'persistentVolumeClaim' + - 'secret' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: true + requiredDropCapabilities: + - ALL diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/querier/querier-dep.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/querier/querier-dep.yaml new file mode 100644 index 00000000000..bd67935962a --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/querier/querier-dep.yaml @@ -0,0 +1,124 @@ +--- +# Source: mimir-distributed/templates/querier/querier-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-enterprise-component-image-values-mimir-querier + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: querier + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + # If replicas is not number (when using values file it's float64, when using --set arg it's int64) and is false (i.e. null) don't set it + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: querier + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: querier + app.kubernetes.io/part-of: memberlist + annotations: + spec: + serviceAccountName: test-enterprise-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + containers: + - name: querier + image: test/querier:1-querier + imagePullPolicy: IfNotPresent + args: + - "-target=querier" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: license + mountPath: /license + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "GOMAXPROCS" + value: "5" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "5000" + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: querier + terminationGracePeriodSeconds: 180 + volumes: + - name: config + configMap: + name: test-enterprise-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: license + secret: + secretName: test-enterprise-component-image-values-mimir-license + - name: runtime-config + configMap: + name: test-enterprise-component-image-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/querier/querier-pdb.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/querier/querier-pdb.yaml new file mode 100644 index 00000000000..1d9ef1d3ea1 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/querier/querier-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/querier/querier-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-enterprise-component-image-values-mimir-querier + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: querier + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: querier + maxUnavailable: 1 diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/querier/querier-svc.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/querier/querier-svc.yaml new file mode 100644 index 00000000000..2acc06a8c41 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/querier/querier-svc.yaml @@ -0,0 +1,30 @@ +--- +# Source: mimir-distributed/templates/querier/querier-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-querier + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: querier + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: querier diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml new file mode 100644 index 00000000000..811184d1b6a --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml @@ -0,0 +1,121 @@ +--- +# Source: mimir-distributed/templates/query-frontend/query-frontend-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-enterprise-component-image-values-mimir-query-frontend + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: query-frontend + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + # If replicas is not number (when using values file it's float64, when using --set arg it's int64) and is false (i.e. null) don't set it + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: query-frontend + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: query-frontend + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-enterprise-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + containers: + - name: query-frontend + image: test/query_frontend:1-query-frontend + imagePullPolicy: IfNotPresent + args: + - "-target=query-frontend" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + # Reduce the likelihood of queries hitting terminated query-frontends. + - "-server.grpc.keepalive.max-connection-age=30s" + - "-shutdown-delay=90s" + volumeMounts: + - name: runtime-config + mountPath: /var/mimir + - name: license + mountPath: /license + - name: config + mountPath: /etc/mimir + - name: storage + mountPath: /data + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "5000" + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: query-frontend + terminationGracePeriodSeconds: 390 + volumes: + - name: config + configMap: + name: test-enterprise-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: license + secret: + secretName: test-enterprise-component-image-values-mimir-license + - name: runtime-config + configMap: + name: test-enterprise-component-image-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml new file mode 100644 index 00000000000..3cc10f1d032 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-enterprise-component-image-values-mimir-query-frontend + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: query-frontend + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: query-frontend + maxUnavailable: 1 diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml new file mode 100644 index 00000000000..d9a20951cae --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml @@ -0,0 +1,29 @@ +--- +# Source: mimir-distributed/templates/query-frontend/query-frontend-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-query-frontend + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: query-frontend + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: query-frontend diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml new file mode 100644 index 00000000000..9d7487750c1 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml @@ -0,0 +1,113 @@ +--- +# Source: mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-enterprise-component-image-values-mimir-query-scheduler + namespace: "citestns" + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: query-scheduler + app.kubernetes.io/managed-by: Helm + annotations: + {} +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: query-scheduler + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: query-scheduler + annotations: + spec: + serviceAccountName: test-enterprise-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + containers: + - name: query-scheduler + image: test/query_scheduler:1-query-scheduler + imagePullPolicy: IfNotPresent + args: + - "-target=query-scheduler" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: runtime-config + mountPath: /var/mimir + - name: license + mountPath: /license + - name: config + mountPath: /etc/mimir + - name: storage + mountPath: /data + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: query-scheduler + terminationGracePeriodSeconds: 180 + volumes: + - name: config + configMap: + name: test-enterprise-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: license + secret: + secretName: test-enterprise-component-image-values-mimir-license + - name: runtime-config + configMap: + name: test-enterprise-component-image-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-pdb.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-pdb.yaml new file mode 100644 index 00000000000..2d136583d1d --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/query-scheduler/query-scheduler-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-enterprise-component-image-values-mimir-query-scheduler + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: query-scheduler + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: query-scheduler + maxUnavailable: 1 diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc-headless.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc-headless.yaml new file mode 100644 index 00000000000..ea1f8ab06d8 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc-headless.yaml @@ -0,0 +1,32 @@ +--- +# Source: mimir-distributed/templates/query-scheduler/query-scheduler-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-query-scheduler-headless + namespace: "citestns" + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: query-scheduler + app.kubernetes.io/managed-by: Helm + prometheus.io/service-monitor: "false" + annotations: + {} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: query-scheduler diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc.yaml new file mode 100644 index 00000000000..731b4ed0b60 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc.yaml @@ -0,0 +1,29 @@ +--- +# Source: mimir-distributed/templates/query-scheduler/query-scheduler-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-query-scheduler + namespace: "citestns" + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: query-scheduler + app.kubernetes.io/managed-by: Helm + annotations: + {} +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: query-scheduler diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/role.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/role.yaml new file mode 100644 index 00000000000..ba845ed067f --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/role.yaml @@ -0,0 +1,16 @@ +--- +# Source: mimir-distributed/templates/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: test-enterprise-component-image-values-mimir + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [test-enterprise-component-image-values-mimir] diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/rolebinding.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/rolebinding.yaml new file mode 100644 index 00000000000..08f0b64559c --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/rolebinding.yaml @@ -0,0 +1,20 @@ +--- +# Source: mimir-distributed/templates/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: test-enterprise-component-image-values-mimir + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: test-enterprise-component-image-values-mimir +subjects: +- kind: ServiceAccount + name: test-enterprise-component-image-values-mimir +- kind: ServiceAccount + name: test-enterprise-component-image-values-mimir-distributed diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml new file mode 100644 index 00000000000..dbd8e4c8ce6 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml @@ -0,0 +1,123 @@ +--- +# Source: mimir-distributed/templates/ruler/ruler-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-enterprise-component-image-values-mimir-ruler + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ruler + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ruler + strategy: + rollingUpdate: + maxSurge: 50% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: ruler + app.kubernetes.io/part-of: memberlist + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-enterprise-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + containers: + - name: ruler + image: test/ruler:1-ruler + imagePullPolicy: IfNotPresent + args: + - "-target=ruler" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-distributor.remote-timeout=10s" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: license + mountPath: /license + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ruler + terminationGracePeriodSeconds: 600 + volumes: + - name: config + configMap: + name: test-enterprise-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: license + secret: + secretName: test-enterprise-component-image-values-mimir-license + - name: runtime-config + configMap: + name: test-enterprise-component-image-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ruler/ruler-pdb.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ruler/ruler-pdb.yaml new file mode 100644 index 00000000000..e74a1f125dd --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ruler/ruler-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/ruler/ruler-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-enterprise-component-image-values-mimir-ruler + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ruler + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ruler + maxUnavailable: 1 diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml new file mode 100644 index 00000000000..07d47fb5104 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml @@ -0,0 +1,26 @@ +--- +# Source: mimir-distributed/templates/ruler/ruler-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-ruler + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ruler + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: ruler diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/runtime-configmap.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/runtime-configmap.yaml new file mode 100644 index 00000000000..c4f76d8639f --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/runtime-configmap.yaml @@ -0,0 +1,15 @@ +--- +# Source: mimir-distributed/templates/runtime-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-enterprise-component-image-values-mimir-runtime + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +data: + runtime.yaml: | + + {} diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/serviceaccount.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/serviceaccount.yaml new file mode 100644 index 00000000000..4f6eb1c9e24 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: mimir-distributed/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: test-enterprise-component-image-values-mimir + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml new file mode 100644 index 00000000000..581eb019fe5 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-enterprise-component-image-values-mimir-store-gateway + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: store-gateway + maxUnavailable: 1 diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml new file mode 100644 index 00000000000..6aff82b3737 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml @@ -0,0 +1,429 @@ +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-enterprise-component-image-values-mimir-store-gateway-zone-a + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-a" + rollout-group: store-gateway + zone: zone-a + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: OrderedReady + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-a + updateStrategy: + type: OnDelete + serviceName: test-enterprise-component-image-values-mimir-store-gateway-headless + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + name: "store-gateway-zone-a" + rollout-group: store-gateway + zone: zone-a + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-enterprise-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: store-gateway + terminationGracePeriodSeconds: 120 + volumes: + - name: config + configMap: + name: test-enterprise-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: license + secret: + secretName: test-enterprise-component-image-values-mimir-license + - name: runtime-config + configMap: + name: test-enterprise-component-image-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: store-gateway + image: test/store_gateway:1-store-gateway + imagePullPolicy: IfNotPresent + args: + - "-target=store-gateway" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-store-gateway.sharding-ring.instance-availability-zone=zone-a" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: license + mountPath: /license + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "GOMAXPROCS" + value: "5" + - name: "GOMEMLIMIT" + value: "536870912" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-enterprise-component-image-values-mimir-store-gateway-zone-b + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-b" + rollout-group: store-gateway + zone: zone-b + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: OrderedReady + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-b + updateStrategy: + type: OnDelete + serviceName: test-enterprise-component-image-values-mimir-store-gateway-headless + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + name: "store-gateway-zone-b" + rollout-group: store-gateway + zone: zone-b + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-enterprise-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: store-gateway + terminationGracePeriodSeconds: 120 + volumes: + - name: config + configMap: + name: test-enterprise-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: license + secret: + secretName: test-enterprise-component-image-values-mimir-license + - name: runtime-config + configMap: + name: test-enterprise-component-image-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: store-gateway + image: test/store_gateway:1-store-gateway + imagePullPolicy: IfNotPresent + args: + - "-target=store-gateway" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-store-gateway.sharding-ring.instance-availability-zone=zone-b" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: license + mountPath: /license + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "GOMAXPROCS" + value: "5" + - name: "GOMEMLIMIT" + value: "536870912" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-enterprise-component-image-values-mimir-store-gateway-zone-c + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-c" + rollout-group: store-gateway + zone: zone-c + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: OrderedReady + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-c + updateStrategy: + type: OnDelete + serviceName: test-enterprise-component-image-values-mimir-store-gateway-headless + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + name: "store-gateway-zone-c" + rollout-group: store-gateway + zone: zone-c + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-enterprise-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: store-gateway + terminationGracePeriodSeconds: 120 + volumes: + - name: config + configMap: + name: test-enterprise-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: license + secret: + secretName: test-enterprise-component-image-values-mimir-license + - name: runtime-config + configMap: + name: test-enterprise-component-image-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: store-gateway + image: test/store_gateway:1-store-gateway + imagePullPolicy: IfNotPresent + args: + - "-target=store-gateway" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-store-gateway.sharding-ring.instance-availability-zone=zone-c" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: license + mountPath: /license + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "GOMAXPROCS" + value: "5" + - name: "GOMEMLIMIT" + value: "536870912" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml new file mode 100644 index 00000000000..34f19b05ba7 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml @@ -0,0 +1,32 @@ +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-store-gateway-headless + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + prometheus.io/service-monitor: "false" + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: store-gateway diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml new file mode 100644 index 00000000000..0c95e394276 --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml @@ -0,0 +1,105 @@ +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-store-gateway-zone-a + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-a" + rollout-group: store-gateway + zone: zone-a + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-a +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-store-gateway-zone-b + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-b" + rollout-group: store-gateway + zone: zone-b + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-b +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-enterprise-component-image-values-mimir-store-gateway-zone-c + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-c" + rollout-group: store-gateway + zone: zone-c + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-c diff --git a/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml new file mode 100644 index 00000000000..d2c3f1d2f1b --- /dev/null +++ b/operations/helm/tests/test-enterprise-component-image-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml @@ -0,0 +1,74 @@ +--- +# Source: mimir-distributed/templates/tokengen/tokengen-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: test-enterprise-component-image-values-mimir-tokengen + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/component: tokengen + app.kubernetes.io/managed-by: Helm + annotations: + "helm.sh/hook": post-install + namespace: "citestns" +spec: + backoffLimit: 6 + completions: 1 + parallelism: 1 + selector: + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-enterprise-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: tokengen + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-enterprise-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + containers: + - name: tokengen + imagePullPolicy: IfNotPresent + args: + - "-target=tokengen" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: license + mountPath: /license + - name: active-queries + mountPath: /active-query-tracker + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + restartPolicy: OnFailure + volumes: + - name: config + configMap: + name: test-enterprise-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: license + secret: + secretName: test-enterprise-component-image-values-mimir-license + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml new file mode 100644 index 00000000000..0b7d7a2b6bc --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/deployment.yaml @@ -0,0 +1,65 @@ +--- +# Source: mimir-distributed/charts/rollout_operator/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-oss-component-image-values-rollout-operator + labels: + helm.sh/chart: rollout-operator-0.22.0 + app.kubernetes.io/name: rollout-operator + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/version: "v0.22.0" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + minReadySeconds: 10 + selector: + matchLabels: + app.kubernetes.io/name: rollout-operator + app.kubernetes.io/instance: test-oss-component-image-values + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + app.kubernetes.io/name: rollout-operator + app.kubernetes.io/instance: test-oss-component-image-values + spec: + serviceAccountName: test-oss-component-image-values-rollout-operator + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + containers: + - name: rollout-operator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + image: "grafana/rollout-operator:v0.22.0" + imagePullPolicy: IfNotPresent + args: + - -kubernetes.namespace=citestns + ports: + - name: http-metrics + containerPort: 8001 + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: + limits: + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/role.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/role.yaml new file mode 100644 index 00000000000..828ff453d14 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/role.yaml @@ -0,0 +1,36 @@ +--- +# Source: mimir-distributed/charts/rollout_operator/templates/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: test-oss-component-image-values-rollout-operator + labels: + helm.sh/chart: rollout-operator-0.22.0 + app.kubernetes.io/name: rollout-operator + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/version: "v0.22.0" + app.kubernetes.io/managed-by: Helm +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - list + - get + - watch + - delete +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - list + - get + - watch +- apiGroups: + - apps + resources: + - statefulsets/status + verbs: + - update diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/rolebinding.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/rolebinding.yaml new file mode 100644 index 00000000000..b3b3d797e8c --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/charts/rollout_operator/templates/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: test-oss-component-image-values-rollout-operator + labels: + helm.sh/chart: rollout-operator-0.22.0 + app.kubernetes.io/name: rollout-operator + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/version: "v0.22.0" + app.kubernetes.io/managed-by: Helm +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: test-oss-component-image-values-rollout-operator +subjects: +- kind: ServiceAccount + name: test-oss-component-image-values-rollout-operator diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml new file mode 100644 index 00000000000..fe28e0fe9ec --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +--- +# Source: mimir-distributed/charts/rollout_operator/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: test-oss-component-image-values-rollout-operator + labels: + helm.sh/chart: rollout-operator-0.22.0 + app.kubernetes.io/name: rollout-operator + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/version: "v0.22.0" + app.kubernetes.io/managed-by: Helm diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-config.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-config.yaml new file mode 100644 index 00000000000..d2fe8cc0414 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-config.yaml @@ -0,0 +1,21 @@ +--- +# Source: mimir-distributed/templates/alertmanager/alertmanager-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-oss-component-image-values-mimir-alertmanager-fallback-config + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: alertmanager + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +data: + alertmanager_fallback_config.yaml: | + receivers: + - name: default-receiver + route: + receiver: default-receiver diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml new file mode 100644 index 00000000000..bbdd875e461 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-oss-component-image-values-mimir-alertmanager + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: alertmanager + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: alertmanager + maxUnavailable: 1 diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml new file mode 100644 index 00000000000..e920243061c --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml @@ -0,0 +1,134 @@ +--- +# Source: mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-oss-component-image-values-mimir-alertmanager + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: alertmanager + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: alertmanager + updateStrategy: + type: RollingUpdate + serviceName: test-oss-component-image-values-mimir-alertmanager + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "1Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: alertmanager + app.kubernetes.io/part-of: memberlist + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-oss-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: alertmanager + terminationGracePeriodSeconds: 900 + volumes: + - name: config + configMap: + name: test-oss-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: test-oss-component-image-values-mimir-runtime + - name: tmp + emptyDir: {} + - name: active-queries + emptyDir: {} + - name: alertmanager-fallback-config + configMap: + name: test-oss-component-image-values-mimir-alertmanager-fallback-config + containers: + - name: alertmanager + image: test/alertmanager:1-alertmanager + imagePullPolicy: IfNotPresent + args: + - "-target=alertmanager" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + # Prometheus HTTP client used to send alerts has a hard-coded idle + # timeout of 5 minutes, therefore the server timeout for Alertmanager + # needs to be higher to avoid connections being closed abruptly. + - "-server.http-idle-timeout=6m" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: alertmanager-fallback-config + mountPath: /configs/ + - name: tmp + mountPath: /tmp + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 10m + memory: 32Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml new file mode 100644 index 00000000000..7ffd5da64f2 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml @@ -0,0 +1,36 @@ +--- +# Source: mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-alertmanager-headless + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: alertmanager + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + prometheus.io/service-monitor: "false" + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + - port: 9094 + protocol: TCP + name: cluster + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: alertmanager diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml new file mode 100644 index 00000000000..92934396128 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml @@ -0,0 +1,30 @@ +--- +# Source: mimir-distributed/templates/alertmanager/alertmanager-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-alertmanager + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: alertmanager + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: alertmanager diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/compactor/compactor-pdb.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/compactor/compactor-pdb.yaml new file mode 100644 index 00000000000..74021686812 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/compactor/compactor-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/compactor/compactor-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-oss-component-image-values-mimir-compactor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: compactor + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: compactor + maxUnavailable: 1 diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml new file mode 100644 index 00000000000..bfc4cb9b3b6 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml @@ -0,0 +1,122 @@ +--- +# Source: mimir-distributed/templates/compactor/compactor-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-oss-component-image-values-mimir-compactor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: compactor + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + podManagementPolicy: OrderedReady + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: compactor + updateStrategy: + type: RollingUpdate + serviceName: test-oss-component-image-values-mimir-compactor + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: compactor + app.kubernetes.io/part-of: memberlist + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-oss-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: compactor + terminationGracePeriodSeconds: 900 + volumes: + - name: config + configMap: + name: test-oss-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: test-oss-component-image-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: compactor + image: test/compactor:1-compactor + imagePullPolicy: IfNotPresent + args: + - "-target=compactor" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml new file mode 100644 index 00000000000..7ba6f5e5f6c --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml @@ -0,0 +1,30 @@ +--- +# Source: mimir-distributed/templates/compactor/compactor-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-compactor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: compactor + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: compactor diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml new file mode 100644 index 00000000000..f80729797e1 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml @@ -0,0 +1,127 @@ +--- +# Source: mimir-distributed/templates/distributor/distributor-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-oss-component-image-values-mimir-distributor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: distributor + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + # If replicas is not number (when using values file it's float64, when using --set arg it's int64) and is false (i.e. null) don't set it + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: distributor + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: distributor + app.kubernetes.io/part-of: memberlist + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-oss-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + containers: + - name: distributor + image: test/distributor:1-distributor + imagePullPolicy: IfNotPresent + args: + - "-target=distributor" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + # When write requests go through distributors via gRPC, we want gRPC clients to re-resolve the distributors DNS + # endpoint before the distributor process is terminated, in order to avoid any failures during graceful shutdown. + # To achieve it, we set a shutdown delay greater than the gRPC max connection age. + - "-server.grpc.keepalive.max-connection-age=60s" + - "-server.grpc.keepalive.max-connection-age-grace=5m" + - "-server.grpc.keepalive.max-connection-idle=1m" + - "-shutdown-delay=90s" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "GOMAXPROCS" + value: "8" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: distributor + terminationGracePeriodSeconds: 100 + volumes: + - name: config + configMap: + name: test-oss-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: test-oss-component-image-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/distributor/distributor-pdb.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/distributor/distributor-pdb.yaml new file mode 100644 index 00000000000..1f90b3e896a --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/distributor/distributor-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/distributor/distributor-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-oss-component-image-values-mimir-distributor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: distributor + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: distributor + maxUnavailable: 1 diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml new file mode 100644 index 00000000000..426fa189f4d --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml @@ -0,0 +1,32 @@ +--- +# Source: mimir-distributed/templates/distributor/distributor-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-distributor-headless + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: distributor + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + prometheus.io/service-monitor: "false" + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: distributor diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml new file mode 100644 index 00000000000..6301fc0ee2c --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml @@ -0,0 +1,30 @@ +--- +# Source: mimir-distributed/templates/distributor/distributor-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-distributor + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: distributor + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: distributor diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml new file mode 100644 index 00000000000..dc6b36b3d05 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml @@ -0,0 +1,26 @@ +--- +# Source: mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-gossip-ring + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: gossip-ring + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: gossip-ring + port: 7946 + appProtocol: tcp + protocol: TCP + targetPort: 7946 + publishNotReadyAddresses: true + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/part-of: memberlist diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml new file mode 100644 index 00000000000..b289c35d00b --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/ingester/ingester-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-oss-component-image-values-mimir-ingester + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ingester + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ingester + maxUnavailable: 1 diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml new file mode 100644 index 00000000000..480fddeb714 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml @@ -0,0 +1,411 @@ +--- +# Source: mimir-distributed/templates/ingester/ingester-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-oss-component-image-values-mimir-ingester-zone-a + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-a" + rollout-group: ingester + zone: zone-a + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-a + updateStrategy: + type: OnDelete + serviceName: test-oss-component-image-values-mimir-ingester-headless + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + name: "ingester-zone-a" + rollout-group: ingester + zone: zone-a + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-oss-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ingester + terminationGracePeriodSeconds: 1200 + volumes: + - name: config + configMap: + name: test-oss-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: test-oss-component-image-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: ingester + image: test/ingester:1-ingester + imagePullPolicy: IfNotPresent + args: + - "-target=ingester" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-ingester.ring.instance-availability-zone=zone-a" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + + - name: "GOMAXPROCS" + value: "4" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" +--- +# Source: mimir-distributed/templates/ingester/ingester-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-oss-component-image-values-mimir-ingester-zone-b + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-b" + rollout-group: ingester + zone: zone-b + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-b + updateStrategy: + type: OnDelete + serviceName: test-oss-component-image-values-mimir-ingester-headless + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + name: "ingester-zone-b" + rollout-group: ingester + zone: zone-b + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-oss-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ingester + terminationGracePeriodSeconds: 1200 + volumes: + - name: config + configMap: + name: test-oss-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: test-oss-component-image-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: ingester + image: test/ingester:1-ingester + imagePullPolicy: IfNotPresent + args: + - "-target=ingester" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-ingester.ring.instance-availability-zone=zone-b" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + + - name: "GOMAXPROCS" + value: "4" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" +--- +# Source: mimir-distributed/templates/ingester/ingester-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-oss-component-image-values-mimir-ingester-zone-c + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-c" + rollout-group: ingester + zone: zone-c + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-c + updateStrategy: + type: OnDelete + serviceName: test-oss-component-image-values-mimir-ingester-headless + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + name: "ingester-zone-c" + rollout-group: ingester + zone: zone-c + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-oss-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ingester + terminationGracePeriodSeconds: 1200 + volumes: + - name: config + configMap: + name: test-oss-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: test-oss-component-image-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: ingester + image: test/ingester:1-ingester + imagePullPolicy: IfNotPresent + args: + - "-target=ingester" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-ingester.ring.instance-availability-zone=zone-c" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + + - name: "GOMAXPROCS" + value: "4" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml new file mode 100644 index 00000000000..e61a54bdaf5 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml @@ -0,0 +1,32 @@ +--- +# Source: mimir-distributed/templates/ingester/ingester-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-ingester-headless + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + prometheus.io/service-monitor: "false" + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ingester diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml new file mode 100644 index 00000000000..83926e19197 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml @@ -0,0 +1,105 @@ +--- +# Source: mimir-distributed/templates/ingester/ingester-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-ingester-zone-a + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-a" + rollout-group: ingester + zone: zone-a + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-a +--- +# Source: mimir-distributed/templates/ingester/ingester-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-ingester-zone-b + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-b" + rollout-group: ingester + zone: zone-b + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-b +--- +# Source: mimir-distributed/templates/ingester/ingester-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-ingester-zone-c + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ingester + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "ingester-zone-c" + rollout-group: ingester + zone: zone-c + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ingester + rollout-group: ingester + zone: zone-c diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/mimir-config.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/mimir-config.yaml new file mode 100644 index 00000000000..d7ae12eb1e4 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/mimir-config.yaml @@ -0,0 +1,96 @@ +--- +# Source: mimir-distributed/templates/mimir-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-oss-component-image-values-mimir-config + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +data: + mimir.yaml: | + + activity_tracker: + filepath: /active-query-tracker/activity.log + alertmanager: + data_dir: /data + enable_api: true + external_url: /alertmanager + fallback_config_file: /configs/alertmanager_fallback_config.yaml + blocks_storage: + backend: s3 + bucket_store: + sync_dir: /data/tsdb-sync + tsdb: + dir: /data/tsdb + head_compaction_interval: 15m + wal_replay_concurrency: 3 + compactor: + compaction_interval: 30m + data_dir: /data + deletion_delay: 2h + first_level_compaction_wait_period: 25m + max_closing_blocks_concurrency: 2 + max_opening_blocks_concurrency: 4 + sharding_ring: + heartbeat_period: 1m + heartbeat_timeout: 4m + wait_stability_min_duration: 1m + symbols_flushers_concurrency: 4 + distributor: + ring: + heartbeat_period: 1m + heartbeat_timeout: 4m + frontend: + parallelize_shardable_queries: true + scheduler_address: test-oss-component-image-values-mimir-query-scheduler-headless.citestns.svc:9095 + frontend_worker: + grpc_client_config: + max_send_msg_size: 419430400 + scheduler_address: test-oss-component-image-values-mimir-query-scheduler-headless.citestns.svc:9095 + ingester: + ring: + final_sleep: 0s + heartbeat_period: 2m + heartbeat_timeout: 10m + num_tokens: 512 + tokens_file_path: /data/tokens + unregister_on_shutdown: false + zone_awareness_enabled: true + ingester_client: + grpc_client_config: + max_recv_msg_size: 104857600 + max_send_msg_size: 104857600 + limits: + max_cache_freshness: 10m + max_query_parallelism: 240 + max_total_query_length: 12000h + memberlist: + abort_if_cluster_join_fails: false + compression_enabled: false + join_members: + - dns+test-oss-component-image-values-mimir-gossip-ring.citestns.svc.cluster.local.:7946 + querier: + max_concurrent: 16 + query_scheduler: + max_outstanding_requests_per_tenant: 800 + ruler: + alertmanager_url: dnssrvnoa+http://_http-metrics._tcp.test-oss-component-image-values-mimir-alertmanager-headless.citestns.svc.cluster.local./alertmanager + enable_api: true + rule_path: /data + runtime_config: + file: /var/mimir/runtime.yaml + store_gateway: + sharding_ring: + heartbeat_period: 1m + heartbeat_timeout: 4m + kvstore: + prefix: multi-zone/ + tokens_file_path: /data/tokens + unregister_on_shutdown: false + wait_stability_min_duration: 1m + zone_awareness_enabled: true + usage_stats: + installation_mode: helm diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml new file mode 100644 index 00000000000..583c7ccf2ff --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml @@ -0,0 +1,138 @@ +--- +# Source: mimir-distributed/templates/nginx/nginx-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-oss-component-image-values-mimir-nginx + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: nginx + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +data: + nginx.conf: | + worker_processes 5; ## Default: 1 + error_log /dev/stderr error; + pid /tmp/nginx.pid; + worker_rlimit_nofile 8192; + + events { + worker_connections 4096; ## Default: 1024 + } + + http { + client_body_temp_path /tmp/client_temp; + proxy_temp_path /tmp/proxy_temp_path; + fastcgi_temp_path /tmp/fastcgi_temp; + uwsgi_temp_path /tmp/uwsgi_temp; + scgi_temp_path /tmp/scgi_temp; + + default_type application/octet-stream; + log_format main '$remote_addr - $remote_user [$time_local] $status ' + '"$request" $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + access_log /dev/stderr main; + + sendfile on; + tcp_nopush on; + proxy_http_version 1.1; + resolver kube-dns.kube-system.svc.cluster.local.; + + # Ensure that X-Scope-OrgID is always present, default to the no_auth_tenant for backwards compatibility when multi-tenancy was turned off. + map $http_x_scope_orgid $ensured_x_scope_orgid { + default $http_x_scope_orgid; + "" "anonymous"; + } + + map $http_x_scope_orgid $has_multiple_orgid_headers { + default 0; + "~^.+,.+$" 1; + } + + proxy_read_timeout 300; + server { + listen 8080; + listen [::]:8080; + + if ($has_multiple_orgid_headers = 1) { + return 400 'Sending multiple X-Scope-OrgID headers is not allowed. Use a single header with | as separator instead.'; + } + + location = / { + return 200 'OK'; + auth_basic off; + } + + proxy_set_header X-Scope-OrgID $ensured_x_scope_orgid; + + # Distributor endpoints + location /distributor { + set $distributor test-oss-component-image-values-mimir-distributor-headless.citestns.svc.cluster.local.; + proxy_pass http://$distributor:8080$request_uri; + } + location = /api/v1/push { + set $distributor test-oss-component-image-values-mimir-distributor-headless.citestns.svc.cluster.local.; + proxy_pass http://$distributor:8080$request_uri; + } + location /otlp/v1/metrics { + set $distributor test-oss-component-image-values-mimir-distributor-headless.citestns.svc.cluster.local.; + proxy_pass http://$distributor:8080$request_uri; + } + + # Alertmanager endpoints + location /alertmanager { + set $alertmanager test-oss-component-image-values-mimir-alertmanager-headless.citestns.svc.cluster.local.; + proxy_pass http://$alertmanager:8080$request_uri; + } + location = /multitenant_alertmanager/status { + set $alertmanager test-oss-component-image-values-mimir-alertmanager-headless.citestns.svc.cluster.local.; + proxy_pass http://$alertmanager:8080$request_uri; + } + location = /multitenant_alertmanager/configs { + set $alertmanager test-oss-component-image-values-mimir-alertmanager-headless.citestns.svc.cluster.local.; + proxy_pass http://$alertmanager:8080$request_uri; + } + location = /api/v1/alerts { + set $alertmanager test-oss-component-image-values-mimir-alertmanager-headless.citestns.svc.cluster.local.; + proxy_pass http://$alertmanager:8080$request_uri; + } + + # Ruler endpoints + location /prometheus/config/v1/rules { + set $ruler test-oss-component-image-values-mimir-ruler.citestns.svc.cluster.local.; + proxy_pass http://$ruler:8080$request_uri; + } + location /prometheus/api/v1/rules { + set $ruler test-oss-component-image-values-mimir-ruler.citestns.svc.cluster.local.; + proxy_pass http://$ruler:8080$request_uri; + } + + location /prometheus/api/v1/alerts { + set $ruler test-oss-component-image-values-mimir-ruler.citestns.svc.cluster.local.; + proxy_pass http://$ruler:8080$request_uri; + } + location = /ruler/ring { + set $ruler test-oss-component-image-values-mimir-ruler.citestns.svc.cluster.local.; + proxy_pass http://$ruler:8080$request_uri; + } + + # Rest of /prometheus goes to the query frontend + location /prometheus { + set $query_frontend test-oss-component-image-values-mimir-query-frontend.citestns.svc.cluster.local.; + proxy_pass http://$query_frontend:8080$request_uri; + } + + # Buildinfo endpoint can go to any component + location = /api/v1/status/buildinfo { + set $query_frontend test-oss-component-image-values-mimir-query-frontend.citestns.svc.cluster.local.; + proxy_pass http://$query_frontend:8080$request_uri; + } + + # Compactor endpoint for uploading blocks + location /api/v1/upload/block/ { + set $compactor test-oss-component-image-values-mimir-compactor.citestns.svc.cluster.local.; + proxy_pass http://$compactor:8080$request_uri; + } + } + } diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/nginx/nginx-dep.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/nginx/nginx-dep.yaml new file mode 100644 index 00000000000..ad188dbb376 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/nginx/nginx-dep.yaml @@ -0,0 +1,92 @@ +--- +# Source: mimir-distributed/templates/nginx/nginx-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-oss-component-image-values-mimir-nginx + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: nginx + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + replicas: 1 + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: nginx + template: + metadata: + annotations: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: nginx + namespace: "citestns" + spec: + serviceAccountName: test-oss-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + terminationGracePeriodSeconds: 30 + containers: + - name: nginx + image: docker.io/nginxinc/nginx-unprivileged:1.27-alpine + imagePullPolicy: IfNotPresent + ports: + - name: http-metric + containerPort: 8080 + protocol: TCP + readinessProbe: + httpGet: + path: / + port: http-metric + initialDelaySeconds: 15 + timeoutSeconds: 1 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + volumeMounts: + - name: config + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + - name: tmp + mountPath: /tmp + - name: docker-entrypoint-d-override + mountPath: /docker-entrypoint.d + resources: + {} + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: nginx + volumes: + - name: config + configMap: + name: test-oss-component-image-values-mimir-nginx + - name: tmp + emptyDir: {} + - name: docker-entrypoint-d-override + emptyDir: {} diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/nginx/nginx-pdb.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/nginx/nginx-pdb.yaml new file mode 100644 index 00000000000..3f27848fd81 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/nginx/nginx-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/nginx/nginx-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-oss-component-image-values-mimir-nginx + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: nginx + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: nginx + maxUnavailable: 1 diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/nginx/nginx-svc.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/nginx/nginx-svc.yaml new file mode 100644 index 00000000000..f3ec2dc1124 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/nginx/nginx-svc.yaml @@ -0,0 +1,25 @@ +--- +# Source: mimir-distributed/templates/nginx/nginx-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-nginx + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: nginx + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - name: http-metric + port: 80 + targetPort: http-metric + protocol: TCP + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: nginx diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml new file mode 100644 index 00000000000..5b5b33fd284 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml @@ -0,0 +1,104 @@ +--- +# Source: mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + {} + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: overrides-exporter + app.kubernetes.io/managed-by: Helm + name: test-oss-component-image-values-mimir-overrides-exporter + namespace: "citestns" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: overrides-exporter + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: overrides-exporter + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-oss-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + containers: + - name: overrides-exporter + image: test/overrides_exporter:1-overrides-exporter + imagePullPolicy: IfNotPresent + args: + - "-target=overrides-exporter" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + livenessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + + terminationGracePeriodSeconds: 30 + volumes: + - name: config + configMap: + name: test-oss-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: test-oss-component-image-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml new file mode 100644 index 00000000000..328e8896470 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-oss-component-image-values-mimir-overrides-exporter + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: overrides-exporter + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: overrides-exporter + maxUnavailable: 1 diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml new file mode 100644 index 00000000000..825a07ebc80 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml @@ -0,0 +1,29 @@ +--- +# Source: mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-overrides-exporter + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: overrides-exporter + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: overrides-exporter diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml new file mode 100644 index 00000000000..95e21da7c3c --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml @@ -0,0 +1,40 @@ +--- +# Source: mimir-distributed/templates/podsecuritypolicy.yaml +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: test-oss-component-image-values-mimir + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + annotations: + "seccomp.security.alpha.kubernetes.io/allowedProfileNames": runtime/default +spec: + privileged: false + allowPrivilegeEscalation: false + volumes: + - 'configMap' + - 'emptyDir' + - 'persistentVolumeClaim' + - 'secret' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: true + requiredDropCapabilities: + - ALL diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/querier/querier-dep.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/querier/querier-dep.yaml new file mode 100644 index 00000000000..5bd3b44a4bc --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/querier/querier-dep.yaml @@ -0,0 +1,119 @@ +--- +# Source: mimir-distributed/templates/querier/querier-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-oss-component-image-values-mimir-querier + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: querier + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + # If replicas is not number (when using values file it's float64, when using --set arg it's int64) and is false (i.e. null) don't set it + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: querier + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: querier + app.kubernetes.io/part-of: memberlist + annotations: + spec: + serviceAccountName: test-oss-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + containers: + - name: querier + image: test/querier:1-querier + imagePullPolicy: IfNotPresent + args: + - "-target=querier" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "GOMAXPROCS" + value: "5" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "5000" + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: querier + terminationGracePeriodSeconds: 180 + volumes: + - name: config + configMap: + name: test-oss-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: test-oss-component-image-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/querier/querier-pdb.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/querier/querier-pdb.yaml new file mode 100644 index 00000000000..881686d988f --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/querier/querier-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/querier/querier-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-oss-component-image-values-mimir-querier + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: querier + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: querier + maxUnavailable: 1 diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/querier/querier-svc.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/querier/querier-svc.yaml new file mode 100644 index 00000000000..77ef468eb14 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/querier/querier-svc.yaml @@ -0,0 +1,30 @@ +--- +# Source: mimir-distributed/templates/querier/querier-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-querier + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: querier + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: querier diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml new file mode 100644 index 00000000000..93ca5581f5f --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml @@ -0,0 +1,116 @@ +--- +# Source: mimir-distributed/templates/query-frontend/query-frontend-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-oss-component-image-values-mimir-query-frontend + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: query-frontend + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + # If replicas is not number (when using values file it's float64, when using --set arg it's int64) and is false (i.e. null) don't set it + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: query-frontend + strategy: + rollingUpdate: + maxSurge: 15% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: query-frontend + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-oss-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + containers: + - name: query-frontend + image: test/query_frontend:1-query-frontend + imagePullPolicy: IfNotPresent + args: + - "-target=query-frontend" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + # Reduce the likelihood of queries hitting terminated query-frontends. + - "-server.grpc.keepalive.max-connection-age=30s" + - "-shutdown-delay=90s" + volumeMounts: + - name: runtime-config + mountPath: /var/mimir + - name: config + mountPath: /etc/mimir + - name: storage + mountPath: /data + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "5000" + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: query-frontend + terminationGracePeriodSeconds: 390 + volumes: + - name: config + configMap: + name: test-oss-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: test-oss-component-image-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml new file mode 100644 index 00000000000..8a5ec3ba994 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-oss-component-image-values-mimir-query-frontend + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: query-frontend + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: query-frontend + maxUnavailable: 1 diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml new file mode 100644 index 00000000000..6ee2d39eb6c --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml @@ -0,0 +1,29 @@ +--- +# Source: mimir-distributed/templates/query-frontend/query-frontend-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-query-frontend + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: query-frontend + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: query-frontend diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml new file mode 100644 index 00000000000..a2e4a0374ad --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml @@ -0,0 +1,108 @@ +--- +# Source: mimir-distributed/templates/query-scheduler/query-scheduler-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-oss-component-image-values-mimir-query-scheduler + namespace: "citestns" + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: query-scheduler + app.kubernetes.io/managed-by: Helm + annotations: + {} +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: query-scheduler + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: query-scheduler + annotations: + spec: + serviceAccountName: test-oss-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + containers: + - name: query-scheduler + image: test/query_scheduler:1-query-scheduler + imagePullPolicy: IfNotPresent + args: + - "-target=query-scheduler" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + volumeMounts: + - name: runtime-config + mountPath: /var/mimir + - name: config + mountPath: /etc/mimir + - name: storage + mountPath: /data + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: query-scheduler + terminationGracePeriodSeconds: 180 + volumes: + - name: config + configMap: + name: test-oss-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: test-oss-component-image-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-pdb.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-pdb.yaml new file mode 100644 index 00000000000..e46f3e177f6 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/query-scheduler/query-scheduler-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-oss-component-image-values-mimir-query-scheduler + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: query-scheduler + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: query-scheduler + maxUnavailable: 1 diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc-headless.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc-headless.yaml new file mode 100644 index 00000000000..a2336b1a3cd --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc-headless.yaml @@ -0,0 +1,32 @@ +--- +# Source: mimir-distributed/templates/query-scheduler/query-scheduler-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-query-scheduler-headless + namespace: "citestns" + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: query-scheduler + app.kubernetes.io/managed-by: Helm + prometheus.io/service-monitor: "false" + annotations: + {} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: query-scheduler diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc.yaml new file mode 100644 index 00000000000..d7613e0de90 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/query-scheduler/query-scheduler-svc.yaml @@ -0,0 +1,29 @@ +--- +# Source: mimir-distributed/templates/query-scheduler/query-scheduler-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-query-scheduler + namespace: "citestns" + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: query-scheduler + app.kubernetes.io/managed-by: Helm + annotations: + {} +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: query-scheduler diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/role.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/role.yaml new file mode 100644 index 00000000000..5f172a2fcd0 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/role.yaml @@ -0,0 +1,16 @@ +--- +# Source: mimir-distributed/templates/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: test-oss-component-image-values-mimir + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [test-oss-component-image-values-mimir] diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/rolebinding.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/rolebinding.yaml new file mode 100644 index 00000000000..c12e2fc8a5a --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/rolebinding.yaml @@ -0,0 +1,20 @@ +--- +# Source: mimir-distributed/templates/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: test-oss-component-image-values-mimir + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: test-oss-component-image-values-mimir +subjects: +- kind: ServiceAccount + name: test-oss-component-image-values-mimir +- kind: ServiceAccount + name: test-oss-component-image-values-mimir-distributed diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml new file mode 100644 index 00000000000..cde4a53234e --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml @@ -0,0 +1,118 @@ +--- +# Source: mimir-distributed/templates/ruler/ruler-dep.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-oss-component-image-values-mimir-ruler + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ruler + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ruler + strategy: + rollingUpdate: + maxSurge: 50% + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: ruler + app.kubernetes.io/part-of: memberlist + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-oss-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + containers: + - name: ruler + image: test/ruler:1-ruler + imagePullPolicy: IfNotPresent + args: + - "-target=ruler" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-distributor.remote-timeout=10s" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 45 + resources: + requests: + cpu: 100m + memory: 128Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ruler + terminationGracePeriodSeconds: 600 + volumes: + - name: config + configMap: + name: test-oss-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: test-oss-component-image-values-mimir-runtime + - name: storage + emptyDir: {} + - name: active-queries + emptyDir: {} diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ruler/ruler-pdb.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ruler/ruler-pdb.yaml new file mode 100644 index 00000000000..532a295c686 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ruler/ruler-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/ruler/ruler-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-oss-component-image-values-mimir-ruler + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ruler + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ruler + maxUnavailable: 1 diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml new file mode 100644 index 00000000000..6086f93a8a8 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml @@ -0,0 +1,26 @@ +--- +# Source: mimir-distributed/templates/ruler/ruler-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-ruler + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ruler + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: ruler diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/runtime-configmap.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/runtime-configmap.yaml new file mode 100644 index 00000000000..19682e22bcb --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/runtime-configmap.yaml @@ -0,0 +1,15 @@ +--- +# Source: mimir-distributed/templates/runtime-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-oss-component-image-values-mimir-runtime + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +data: + runtime.yaml: | + + {} diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/serviceaccount.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/serviceaccount.yaml new file mode 100644 index 00000000000..9e073752729 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: mimir-distributed/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: test-oss-component-image-values-mimir + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/smoke-test/smoke-test-job.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/smoke-test/smoke-test-job.yaml new file mode 100644 index 00000000000..1f4c8bdc10d --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/smoke-test/smoke-test-job.yaml @@ -0,0 +1,54 @@ +--- +# Source: mimir-distributed/templates/smoke-test/smoke-test-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: test-oss-component-image-values-mimir-smoke-test + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: smoke-test + app.kubernetes.io/managed-by: Helm + annotations: + "helm.sh/hook": test + namespace: "citestns" +spec: + backoffLimit: 5 + completions: 1 + parallelism: 1 + selector: + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: smoke-test + spec: + serviceAccountName: test-oss-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + initContainers: + [] + containers: + - name: smoke-test + image: test/smoke_test:1-smoke-test + imagePullPolicy: IfNotPresent + args: + - "-target=continuous-test" + - "-activity-tracker.filepath=" + - "-tests.smoke-test" + - "-tests.write-endpoint=http://test-oss-component-image-values-mimir-nginx.citestns.svc:80" + - "-tests.read-endpoint=http://test-oss-component-image-values-mimir-nginx.citestns.svc:80/prometheus" + - "-tests.tenant-id=" + - "-tests.write-read-series-test.num-series=1000" + - "-tests.write-read-series-test.max-query-age=48h" + - "-server.http-listen-port=8080" + volumeMounts: + restartPolicy: OnFailure + volumes: diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml new file mode 100644 index 00000000000..cb659670790 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml @@ -0,0 +1,19 @@ +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: test-oss-component-image-values-mimir-store-gateway + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/managed-by: Helm + namespace: "citestns" +spec: + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: store-gateway + maxUnavailable: 1 diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml new file mode 100644 index 00000000000..6c10994cefe --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml @@ -0,0 +1,414 @@ +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-oss-component-image-values-mimir-store-gateway-zone-a + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-a" + rollout-group: store-gateway + zone: zone-a + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: OrderedReady + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-a + updateStrategy: + type: OnDelete + serviceName: test-oss-component-image-values-mimir-store-gateway-headless + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + name: "store-gateway-zone-a" + rollout-group: store-gateway + zone: zone-a + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-oss-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: store-gateway + terminationGracePeriodSeconds: 120 + volumes: + - name: config + configMap: + name: test-oss-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: test-oss-component-image-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: store-gateway + image: test/store_gateway:1-store-gateway + imagePullPolicy: IfNotPresent + args: + - "-target=store-gateway" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-store-gateway.sharding-ring.instance-availability-zone=zone-a" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "GOMAXPROCS" + value: "5" + - name: "GOMEMLIMIT" + value: "536870912" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-oss-component-image-values-mimir-store-gateway-zone-b + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-b" + rollout-group: store-gateway + zone: zone-b + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: OrderedReady + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-b + updateStrategy: + type: OnDelete + serviceName: test-oss-component-image-values-mimir-store-gateway-headless + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + name: "store-gateway-zone-b" + rollout-group: store-gateway + zone: zone-b + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-oss-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: store-gateway + terminationGracePeriodSeconds: 120 + volumes: + - name: config + configMap: + name: test-oss-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: test-oss-component-image-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: store-gateway + image: test/store_gateway:1-store-gateway + imagePullPolicy: IfNotPresent + args: + - "-target=store-gateway" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-store-gateway.sharding-ring.instance-availability-zone=zone-b" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "GOMAXPROCS" + value: "5" + - name: "GOMEMLIMIT" + value: "536870912" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-oss-component-image-values-mimir-store-gateway-zone-c + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-c" + rollout-group: store-gateway + zone: zone-c + annotations: + rollout-max-unavailable: "50" + namespace: "citestns" +spec: + podManagementPolicy: OrderedReady + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-c + updateStrategy: + type: OnDelete + serviceName: test-oss-component-image-values-mimir-store-gateway-headless + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: storage + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "2Gi" + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + name: "store-gateway-zone-c" + rollout-group: store-gateway + zone: zone-c + annotations: + namespace: "citestns" + spec: + serviceAccountName: test-oss-component-image-values-mimir + securityContext: + fsGroup: 10001 + runAsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + seccompProfile: + type: RuntimeDefault + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: store-gateway + terminationGracePeriodSeconds: 120 + volumes: + - name: config + configMap: + name: test-oss-component-image-values-mimir-config + items: + - key: "mimir.yaml" + path: "mimir.yaml" + - name: runtime-config + configMap: + name: test-oss-component-image-values-mimir-runtime + - name: active-queries + emptyDir: {} + containers: + - name: store-gateway + image: test/store_gateway:1-store-gateway + imagePullPolicy: IfNotPresent + args: + - "-target=store-gateway" + - "-config.expand-env=true" + - "-config.file=/etc/mimir/mimir.yaml" + - "-store-gateway.sharding-ring.instance-availability-zone=zone-c" + volumeMounts: + - name: config + mountPath: /etc/mimir + - name: runtime-config + mountPath: /var/mimir + - name: storage + mountPath: "/data" + - name: active-queries + mountPath: /active-query-tracker + ports: + - name: http-metrics + containerPort: 8080 + protocol: TCP + - name: grpc + containerPort: 9095 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + livenessProbe: + null + readinessProbe: + httpGet: + path: /ready + port: http-metrics + initialDelaySeconds: 60 + resources: + requests: + cpu: 100m + memory: 512Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + env: + - name: "GOMAXPROCS" + value: "5" + - name: "GOMEMLIMIT" + value: "536870912" + - name: "JAEGER_REPORTER_MAX_QUEUE_SIZE" + value: "1000" diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml new file mode 100644 index 00000000000..f24129b582e --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml @@ -0,0 +1,32 @@ +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-store-gateway-headless + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + prometheus.io/service-monitor: "false" + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: store-gateway diff --git a/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml new file mode 100644 index 00000000000..952256a0806 --- /dev/null +++ b/operations/helm/tests/test-oss-component-image-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml @@ -0,0 +1,105 @@ +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-store-gateway-zone-a + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-a" + rollout-group: store-gateway + zone: zone-a + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-a +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-store-gateway-zone-b + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-b" + rollout-group: store-gateway + zone: zone-b + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-b +--- +# Source: mimir-distributed/templates/store-gateway/store-gateway-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-component-image-values-mimir-store-gateway-zone-c + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: store-gateway + app.kubernetes.io/part-of: memberlist + app.kubernetes.io/managed-by: Helm + name: "store-gateway-zone-c" + rollout-group: store-gateway + zone: zone-c + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + ports: + - port: 8080 + protocol: TCP + name: http-metrics + targetPort: http-metrics + - port: 9095 + protocol: TCP + name: grpc + targetPort: grpc + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-component-image-values + app.kubernetes.io/component: store-gateway + rollout-group: store-gateway + zone: zone-c From 91e5fb1cc3d59a493f278ef1d92422488e0df73d Mon Sep 17 00:00:00 2001 From: Julien Duchesne Date: Fri, 10 Jan 2025 10:53:13 -0500 Subject: [PATCH 09/18] Update to latest mimir-prometheus (#10400) * Update to latest mimir-prometheus Also implement new rule concurrency interface from https://github.com/prometheus/prometheus/pull/15681 * MQE: Pull new functions test and fix _over_time functions Same fix as https://github.com/prometheus/prometheus/pull/15711 and similar to what was done during the last sync: https://github.com/grafana/mimir/pull/10383 * Linting * MQE: Use `.Any()` as suggested in PR * Add CHANGELOG * Remove sequentialRules, not needed A nil slice will run rules sequentially * Address PR comments * Add test to check that group isn't mutated --- CHANGELOG.md | 3 + go.mod | 2 +- go.sum | 4 +- pkg/ruler/fixtures/rules_chain.yaml | 22 ++ pkg/ruler/fixtures/rules_indeterminates.yaml | 18 ++ .../fixtures/rules_multiple_independent.yaml | 15 ++ .../rules_topological_sort_needed.json | 245 ++++++++++++++++++ pkg/ruler/rule_concurrency.go | 104 ++++++-- pkg/ruler/rule_concurrency_test.go | 222 +++++++++------- pkg/ruler/rule_query_consistency_test.go | 4 +- .../operators/functions/range_vectors.go | 29 ++- .../testdata/ours/functions.test | 4 +- .../testdata/upstream/functions.test | 86 +++++- .../testdata/upstream/native_histograms.test | 36 +++ .../prometheus/prometheus/promql/functions.go | 75 +++--- .../prometheus/promql/promqltest/README.md | 22 +- .../prometheus/promql/promqltest/test.go | 51 ++-- .../promql/promqltest/testdata/functions.test | 78 +++++- .../testdata/native_histograms.test | 33 +++ .../prometheus/prometheus/rules/alerting.go | 53 +++- .../prometheus/prometheus/rules/group.go | 94 ++++--- .../prometheus/prometheus/rules/manager.go | 63 ++++- .../prometheus/prometheus/rules/recording.go | 54 +++- .../prometheus/prometheus/rules/rule.go | 14 +- .../prometheus/prometheus/scrape/target.go | 4 +- .../storage/remote/metadata_watcher.go | 2 +- .../storage/remote/queue_manager.go | 2 +- .../prometheus/tsdb/index/postings.go | 5 + .../prometheus/tsdb/wlog/watcher.go | 2 +- .../prometheus/prometheus/web/api/v1/api.go | 57 +++- vendor/modules.txt | 4 +- 31 files changed, 1104 insertions(+), 303 deletions(-) create mode 100644 pkg/ruler/fixtures/rules_chain.yaml create mode 100644 pkg/ruler/fixtures/rules_indeterminates.yaml create mode 100644 pkg/ruler/fixtures/rules_multiple_independent.yaml create mode 100644 pkg/ruler/fixtures/rules_topological_sort_needed.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 760f3b6fd8c..fc5f0ffde44 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ * [ENHANCEMENT] Ingester: More efficient CPU/memory utilization-based read request limiting. #10325 * [ENHANCEMENT] Dashboards: Add Query-Scheduler <-> Querier Inflight Requests row to Query Reads and Remote Ruler reads dashboards. #10290 * [ENHANCEMENT] OTLP: In addition to the flag `-distributor.otel-created-timestamp-zero-ingestion-enabled` there is now `-distributor.otel-start-time-quiet-zero` to convert OTel start timestamps to Prometheus QuietZeroNaNs. This flag is to make the change rollout safe between Ingesters and Distributors. #10238 +* [ENHANCEMENT] Ruler: When rule concurrency is enabled for a rule group, its rules will now be reordered and run in batches based on their dependencies. This increases the number of rules that can potentially run concurrently. Note that the global and tenant-specific limits still apply #10400 * [BUGFIX] Distributor: Use a boolean to track changes while merging the ReplicaDesc components, rather than comparing the objects directly. #10185 * [BUGFIX] Querier: fix timeout responding to query-frontend when response size is very close to `-querier.frontend-client.grpc-max-send-msg-size`. #10154 * [BUGFIX] Query-frontend and querier: show warning/info annotations in some cases where they were missing (if a lazy querier was used). #10277 @@ -25,6 +26,8 @@ * [BUGFIX] Mimirtool: `remote-read` commands will now return data. #10286 * [BUGFIX] PromQL: Fix deriv, predict_linear and double_exponential_smoothing with histograms https://github.com/prometheus/prometheus/pull/15686 #10383 * [BUGFIX] MQE: Fix deriv with histograms #10383 +* [BUGFIX] PromQL: Fix functions with histograms https://github.com/prometheus/prometheus/pull/15711 #10400 +* [BUGFIX] MQE: Fix functions with histograms #10400 ### Mixin diff --git a/go.mod b/go.mod index ff34ae71072..f2bd093067b 100644 --- a/go.mod +++ b/go.mod @@ -287,7 +287,7 @@ require ( sigs.k8s.io/yaml v1.4.0 // indirect ) -replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20250109135143-114aaaadc203 +replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20250110020350-a1e2bcf4a615 // Replace memberlist with our fork which includes some fixes that haven't been // merged upstream yet: diff --git a/go.sum b/go.sum index de3ca81db05..918a1ec5fb7 100644 --- a/go.sum +++ b/go.sum @@ -1283,8 +1283,8 @@ github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40 h1:1TeKhyS+pvzO github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40/go.mod h1:IGRj8oOoxwJbHBYl1+OhS9UjQR0dv6SQOep7HqmtyFU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/grafana/mimir-prometheus v0.0.0-20250109135143-114aaaadc203 h1:gCU3GO2mZUzsLAa/JRRDJpKbYhkXy7caWnzfNqbgDig= -github.com/grafana/mimir-prometheus v0.0.0-20250109135143-114aaaadc203/go.mod h1:KfyZCeyGxf5gvl6VZbrQsd400nJjGw+ygMEtDVZKIT4= +github.com/grafana/mimir-prometheus v0.0.0-20250110020350-a1e2bcf4a615 h1:lr3wUcXU0mScCDn/4NXc0CYglZJfy5l35sOJFar9qE0= +github.com/grafana/mimir-prometheus v0.0.0-20250110020350-a1e2bcf4a615/go.mod h1:KfyZCeyGxf5gvl6VZbrQsd400nJjGw+ygMEtDVZKIT4= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956 h1:em1oddjXL8c1tL0iFdtVtPloq2hRPen2MJQKoAWpxu0= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/grafana/prometheus-alertmanager v0.25.1-0.20240930132144-b5e64e81e8d3 h1:6D2gGAwyQBElSrp3E+9lSr7k8gLuP3Aiy20rweLWeBw= diff --git a/pkg/ruler/fixtures/rules_chain.yaml b/pkg/ruler/fixtures/rules_chain.yaml new file mode 100644 index 00000000000..00043b8d6f3 --- /dev/null +++ b/pkg/ruler/fixtures/rules_chain.yaml @@ -0,0 +1,22 @@ +groups: + - name: chain + rules: + # Evaluated concurrently, no dependencies + - record: job:http_requests:rate1m + expr: sum by (job)(rate(http_requests_total[1m])) + - record: job:http_requests:rate5m + expr: sum by (job)(rate(http_requests_total[1m])) + + # Evaluated sequentially, dependents and dependencies + - record: job1:http_requests:rate1m + expr: job:http_requests:rate1m{job="job1"} + - record: job1_cluster1:http_requests:rate1m + expr: job1:http_requests:rate1m{cluster="cluster1"} + + # Evaluated concurrently, no dependents + - record: job1_cluster2:http_requests:rate1m + expr: job1:http_requests:rate1m{cluster="cluster2"} + - record: job1_cluster1_namespace1:http_requests:rate1m + expr: job1_cluster1:http_requests:rate1m{namespace="namespace1"} + - record: job1_cluster1_namespace2:http_requests:rate1m + expr: job1_cluster1:http_requests:rate1m{namespace="namespace2"} diff --git a/pkg/ruler/fixtures/rules_indeterminates.yaml b/pkg/ruler/fixtures/rules_indeterminates.yaml new file mode 100644 index 00000000000..a906d3b504a --- /dev/null +++ b/pkg/ruler/fixtures/rules_indeterminates.yaml @@ -0,0 +1,18 @@ +groups: + - name: indeterminate + rules: + # This shouldn't run in parallel because of the open matcher + - record: job:http_requests:rate1m + expr: sum by (job)(rate(http_requests_total[1m])) + - record: job:http_requests:rate5m + expr: sum by (job)(rate(http_requests_total[5m])) + - record: job:http_requests:rate15m + expr: sum by (job)(rate(http_requests_total[15m])) + - record: job:http_requests:rate30m + expr: sum by (job)(rate(http_requests_total[30m])) + - record: job:http_requests:rate1h + expr: sum by (job)(rate(http_requests_total[1h])) + - record: job:http_requests:rate2h + expr: sum by (job)(rate(http_requests_total[2h])) + - record: matcher + expr: '{job="job1"}' diff --git a/pkg/ruler/fixtures/rules_multiple_independent.yaml b/pkg/ruler/fixtures/rules_multiple_independent.yaml new file mode 100644 index 00000000000..e071be3eff8 --- /dev/null +++ b/pkg/ruler/fixtures/rules_multiple_independent.yaml @@ -0,0 +1,15 @@ +groups: + - name: independents + rules: + - record: job:http_requests:rate1m + expr: sum by (job)(rate(http_requests_total[1m])) + - record: job:http_requests:rate5m + expr: sum by (job)(rate(http_requests_total[5m])) + - record: job:http_requests:rate15m + expr: sum by (job)(rate(http_requests_total[15m])) + - record: job:http_requests:rate30m + expr: sum by (job)(rate(http_requests_total[30m])) + - record: job:http_requests:rate1h + expr: sum by (job)(rate(http_requests_total[1h])) + - record: job:http_requests:rate2h + expr: sum by (job)(rate(http_requests_total[2h])) diff --git a/pkg/ruler/fixtures/rules_topological_sort_needed.json b/pkg/ruler/fixtures/rules_topological_sort_needed.json new file mode 100644 index 00000000000..6f63c38c94c --- /dev/null +++ b/pkg/ruler/fixtures/rules_topological_sort_needed.json @@ -0,0 +1,245 @@ +{ + "groups": [ + { + "name": "test-group", + "rules": [ + { + "record": "pf:nginx_http_requests:rate5m", + "expr": "sum by (lp_service, k8scluster) (rate(nginx_http_requests_total{k8scluster=\"sy-kube01\",lp_service=~\"lp-(csds|mtls|rtbf|encryptionmgmt)-web\"}[5m]))" + }, + { + "record": "pf:nginx_http_requests:rate5m:avg_over_time_1w", + "expr": "avg_over_time(pf:nginx_http_requests:rate5m[1w])" + }, + { + "record": "pf:nginx_http_requests:rate5m:stddev_over_time_1w", + "expr": "stddev_over_time(pf:nginx_http_requests:rate5m[1w])" + }, + { + "record": "pf:nginx_http_requests:rate5m_prediction", + "expr": "clamp_min(quantile without (offset) (0.5, label_replace(avg_over_time(pf:nginx_http_requests:rate5m[4h] offset 6d22h) + pf:nginx_http_requests:rate5m:avg_over_time_1w - pf:nginx_http_requests:rate5m:avg_over_time_1w offset 1w, \"offset\", \"1w\", \"\", \"\") or label_replace(avg_over_time(pf:nginx_http_requests:rate5m[4h] offset 13d22h) + pf:nginx_http_requests:rate5m:avg_over_time_1w - pf:nginx_http_requests:rate5m:avg_over_time_1w offset 2w, \"offset\", \"2w\", \"\", \"\") or label_replace(avg_over_time(pf:nginx_http_requests:rate5m[4h] offset 20d22h) + pf:nginx_http_requests:rate5m:avg_over_time_1w - pf:nginx_http_requests:rate5m:avg_over_time_1w offset 3w, \"offset\", \"3w\", \"\", \"\")), 0)" + }, + { + "record": "pf:nginx_response_time:avg_over_time_5m", + "expr": "sum by (lp_service, k8scluster) (rate(nginx_http_request_duration_seconds_sum{k8scluster=\"sy-kube01\",lp_service=~\"lp-(csds|mtls|rtbf|encryptionmgmt)-web\"}[5m])) / sum by (lp_service, k8scluster) (rate(nginx_http_request_duration_seconds_count{k8scluster=\"sy-kube01\",lp_service=~\"lp-(csds|mtls|rtbf|encryptionmgmt)-web\"}[5m]))" + }, + { + "record": "pf:nginx_response_time:avg_over_time_5m:avg_over_time_1w", + "expr": "avg_over_time(pf:nginx_response_time:avg_over_time_5m[1w])" + }, + { + "record": "pf:nginx_response_time:avg_over_time_5m:stddev_over_time_1w", + "expr": "stddev_over_time(pf:nginx_response_time:avg_over_time_5m[1w])" + }, + { + "record": "pf:nginx_response_time:avg_over_time_5m_prediction", + "expr": "clamp_min(quantile without (offset) (0.5, label_replace(avg_over_time(pf:nginx_response_time:avg_over_time_5m[4h] offset 6d22h) + pf:nginx_response_time:avg_over_time_5m:avg_over_time_1w - pf:nginx_response_time:avg_over_time_5m:avg_over_time_1w offset 1w, \"offset\", \"1w\", \"\", \"\") or label_replace(avg_over_time(pf:nginx_response_time:avg_over_time_5m[4h] offset 13d22h) + pf:nginx_response_time:avg_over_time_5m:avg_over_time_1w - pf:nginx_response_time:avg_over_time_5m:avg_over_time_1w offset 2w, \"offset\", \"2w\", \"\", \"\") or label_replace(avg_over_time(pf:nginx_response_time:avg_over_time_5m[4h] offset 20d22h) + pf:nginx_response_time:avg_over_time_5m:avg_over_time_1w - pf:nginx_response_time:avg_over_time_5m:avg_over_time_1w offset 3w, \"offset\", \"3w\", \"\", \"\")), 0)" + }, + { + "record": "pf:nginx_http_4xx_responses:rate5m", + "expr": "sum by (lp_service, k8scluster) (rate(nginx_http_requests_total{k8scluster=\"sy-kube01\",lp_service=~\"lp-(csds|mtls|rtbf|encryptionmgmt)-web\",status=~\"4.*\"}[5m]))" + }, + { + "record": "pf:nginx_http_4xx_responses:rate5m:avg_over_time_1w", + "expr": "avg_over_time(pf:nginx_http_4xx_responses:rate5m[1w])" + }, + { + "record": "pf:nginx_http_4xx_responses:rate5m:stddev_over_time_1w", + "expr": "stddev_over_time(pf:nginx_http_4xx_responses:rate5m[1w])" + }, + { + "record": "pf:nginx_http_4xx_responses:rate5m_prediction", + "expr": "clamp_min(quantile without (offset) (0.5, label_replace(avg_over_time(pf:nginx_http_4xx_responses:rate5m[4h] offset 6d22h) + pf:nginx_http_4xx_responses:rate5m:avg_over_time_1w - pf:nginx_http_4xx_responses:rate5m:avg_over_time_1w offset 1w, \"offset\", \"1w\", \"\", \"\") or label_replace(avg_over_time(pf:nginx_http_4xx_responses:rate5m[4h] offset 13d22h) + pf:nginx_http_4xx_responses:rate5m:avg_over_time_1w - pf:nginx_http_4xx_responses:rate5m:avg_over_time_1w offset 2w, \"offset\", \"2w\", \"\", \"\") or label_replace(avg_over_time(pf:nginx_http_4xx_responses:rate5m[4h] offset 20d22h) + pf:nginx_http_4xx_responses:rate5m:avg_over_time_1w - pf:nginx_http_4xx_responses:rate5m:avg_over_time_1w offset 3w, \"offset\", \"3w\", \"\", \"\")), 0)" + }, + { + "record": "pf:nginx_http_5xx_responses:rate5m", + "expr": "sum by (lp_service, k8scluster) (rate(nginx_http_requests_total{k8scluster=\"sy-kube01\",lp_service=~\"lp-(csds|mtls|rtbf|encryptionmgmt)-web\",status=~\"5.*\"}[5m]))" + }, + { + "record": "pf:nginx_http_5xx_responses:rate5m:avg_over_time_1w", + "expr": "avg_over_time(pf:nginx_http_5xx_responses:rate5m[1w])" + }, + { + "record": "pf:nginx_http_5xx_responses:rate5m:stddev_over_time_1w", + "expr": "stddev_over_time(pf:nginx_http_5xx_responses:rate5m[1w])" + }, + { + "record": "pf:nginx_http_5xx_responses:rate5m_prediction", + "expr": "clamp_min(quantile without (offset) (0.5, label_replace(avg_over_time(pf:nginx_http_5xx_responses:rate5m[4h] offset 6d22h) + pf:nginx_http_5xx_responses:rate5m:avg_over_time_1w - pf:nginx_http_5xx_responses:rate5m:avg_over_time_1w offset 1w, \"offset\", \"1w\", \"\", \"\") or label_replace(avg_over_time(pf:nginx_http_5xx_responses:rate5m[4h] offset 13d22h) + pf:nginx_http_5xx_responses:rate5m:avg_over_time_1w - pf:nginx_http_5xx_responses:rate5m:avg_over_time_1w offset 2w, \"offset\", \"2w\", \"\", \"\") or label_replace(avg_over_time(pf:nginx_http_5xx_responses:rate5m[4h] offset 20d22h) + pf:nginx_http_5xx_responses:rate5m:avg_over_time_1w - pf:nginx_http_5xx_responses:rate5m:avg_over_time_1w offset 3w, \"offset\", \"3w\", \"\", \"\")), 0)" + }, + { + "record": "pf:app_http_requests:rate5m", + "expr": "sum by (application, k8scluster) (rate(http_server_requests_seconds_count{application=~\"lp-(csds|mtls|rtbf|encryptionmgmt|acdefaults|acprovision|acsitesetting|acdomainprotection|rollover|providersubscription|providersubscriptionv2)-app\",k8scluster=\"sy-kube01\"}[5m]))" + }, + { + "record": "pf:app_http_requests:rate5m:avg_over_time_1w", + "expr": "avg_over_time(pf:app_http_requests:rate5m[1w])" + }, + { + "record": "pf:app_http_requests:rate5m:stddev_over_time_1w", + "expr": "stddev_over_time(pf:app_http_requests:rate5m[1w])" + }, + { + "record": "pf:app_http_requests:rate5m_prediction", + "expr": "clamp_min(quantile without (offset) (0.5, label_replace(avg_over_time(pf:app_http_requests:rate5m[4h] offset 6d22h) + pf:app_http_requests:rate5m:avg_over_time_1w - pf:app_http_requests:rate5m:avg_over_time_1w offset 1w, \"offset\", \"1w\", \"\", \"\") or label_replace(avg_over_time(pf:app_http_requests:rate5m[4h] offset 13d22h) + pf:app_http_requests:rate5m:avg_over_time_1w - pf:app_http_requests:rate5m:avg_over_time_1w offset 2w, \"offset\", \"2w\", \"\", \"\") or label_replace(avg_over_time(pf:app_http_requests:rate5m[4h] offset 20d22h) + pf:app_http_requests:rate5m:avg_over_time_1w - pf:app_http_requests:rate5m:avg_over_time_1w offset 3w, \"offset\", \"3w\", \"\", \"\")), 0)" + }, + { + "record": "pf:app_response_time:avg_over_time_5m", + "expr": "sum by (application, k8scluster) (rate(http_server_requests_seconds_sum{application=~\"lp-(csds|mtls|rtbf|encryptionmgmt|acdefaults|acprovision|acsitesetting|acdomainprotection|rollover|providersubscription|providersubscriptionv2)-app\",k8scluster=\"sy-kube01\"}[5m])) / sum by (application, k8scluster) (rate(http_server_requests_seconds_count{application=~\"lp-(csds|mtls|rtbf|encryptionmgmt|acdefaults|acprovision|acsitesetting|acdomainprotection|rollover|providersubscription|providersubscriptionv2)-app\",k8scluster=\"sy-kube01\"}[5m]))" + }, + { + "record": "pf:app_response_time:avg_over_time_5m:avg_over_time_1w", + "expr": "avg_over_time(pf:app_response_time:avg_over_time_5m[1w])" + }, + { + "record": "pf:app_response_time:avg_over_time_5m:stddev_over_time_1w", + "expr": "stddev_over_time(pf:app_response_time:avg_over_time_5m[1w])" + }, + { + "record": "pf:app_response_time:avg_over_time_5m_prediction", + "expr": "clamp_min(quantile without (offset) (0.5, label_replace(avg_over_time(pf:app_response_time:avg_over_time_5m[4h] offset 6d22h) + pf:app_response_time:avg_over_time_5m:avg_over_time_1w - pf:app_response_time:avg_over_time_5m:avg_over_time_1w offset 1w, \"offset\", \"1w\", \"\", \"\") or label_replace(avg_over_time(pf:app_response_time:avg_over_time_5m[4h] offset 13d22h) + pf:app_response_time:avg_over_time_5m:avg_over_time_1w - pf:app_response_time:avg_over_time_5m:avg_over_time_1w offset 2w, \"offset\", \"2w\", \"\", \"\") or label_replace(avg_over_time(pf:app_response_time:avg_over_time_5m[4h] offset 20d22h) + pf:app_response_time:avg_over_time_5m:avg_over_time_1w - pf:app_response_time:avg_over_time_5m:avg_over_time_1w offset 3w, \"offset\", \"3w\", \"\", \"\")), 0)" + }, + { + "record": "pf:app_http_4xx_responses:rate5m", + "expr": "sum by (application, k8scluster) (rate(http_server_requests_seconds_count{application=~\"lp-(csds|mtls|rtbf|encryptionmgmt|acdefaults|acprovision|acsitesetting|acdomainprotection|rollover|providersubscription|providersubscriptionv2)-app\",k8scluster=\"sy-kube01\",status=~\"4.*\"}[5m]))" + }, + { + "record": "pf:app_http_4xx_responses:rate5m:avg_over_time_1w", + "expr": "avg_over_time(pf:app_http_4xx_responses:rate5m[1w])" + }, + { + "record": "pf:app_http_4xx_responses:rate5m:stddev_over_time_1w", + "expr": "stddev_over_time(pf:app_http_4xx_responses:rate5m[1w])" + }, + { + "record": "pf:app_http_4xx_responses:rate5m_prediction", + "expr": "clamp_min(quantile without (offset) (0.5, label_replace(avg_over_time(pf:app_http_4xx_responses:rate5m[4h] offset 6d22h) + pf:app_http_4xx_responses:rate5m:avg_over_time_1w - pf:app_http_4xx_responses:rate5m:avg_over_time_1w offset 1w, \"offset\", \"1w\", \"\", \"\") or label_replace(avg_over_time(pf:app_http_4xx_responses:rate5m[4h] offset 13d22h) + pf:app_http_4xx_responses:rate5m:avg_over_time_1w - pf:app_http_4xx_responses:rate5m:avg_over_time_1w offset 2w, \"offset\", \"2w\", \"\", \"\") or label_replace(avg_over_time(pf:app_http_4xx_responses:rate5m[4h] offset 20d22h) + pf:app_http_4xx_responses:rate5m:avg_over_time_1w - pf:app_http_4xx_responses:rate5m:avg_over_time_1w offset 3w, \"offset\", \"3w\", \"\", \"\")), 0)" + }, + { + "record": "pf:app_http_5xx_responses:rate5m", + "expr": "sum by (application, k8scluster) (rate(http_server_requests_seconds_count{application=~\"lp-(csds|mtls|rtbf|encryptionmgmt|acdefaults|acprovision|acsitesetting|acdomainprotection|rollover|providersubscription|providersubscriptionv2)-app\",k8scluster=\"sy-kube01\",status=~\"5.*\"}[5m]))" + }, + { + "record": "pf:app_http_5xx_responses:rate5m:avg_over_time_1w", + "expr": "avg_over_time(pf:app_http_5xx_responses:rate5m[1w])" + }, + { + "record": "pf:app_http_5xx_responses:rate5m:stddev_over_time_1w", + "expr": "stddev_over_time(pf:app_http_5xx_responses:rate5m[1w])" + }, + { + "record": "pf:app_http_5xx_responses:rate5m_prediction", + "expr": "clamp_min(quantile without (offset) (0.5, label_replace(avg_over_time(pf:app_http_5xx_responses:rate5m[4h] offset 6d22h) + pf:app_http_5xx_responses:rate5m:avg_over_time_1w - pf:app_http_5xx_responses:rate5m:avg_over_time_1w offset 1w, \"offset\", \"1w\", \"\", \"\") or label_replace(avg_over_time(pf:app_http_5xx_responses:rate5m[4h] offset 13d22h) + pf:app_http_5xx_responses:rate5m:avg_over_time_1w - pf:app_http_5xx_responses:rate5m:avg_over_time_1w offset 2w, \"offset\", \"2w\", \"\", \"\") or label_replace(avg_over_time(pf:app_http_5xx_responses:rate5m[4h] offset 20d22h) + pf:app_http_5xx_responses:rate5m:avg_over_time_1w - pf:app_http_5xx_responses:rate5m:avg_over_time_1w offset 3w, \"offset\", \"3w\", \"\", \"\")), 0)" + }, + { + "record": "pf:app_log_events:rate5m", + "expr": "sum by (lp_service, level, k8scluster) (rate(log4j2_events_total{k8scluster=\"sy-kube01\",level=~\"error|warn\",lp_service=~\"lp-(csds|mtls|rtbf|encryptionmgmt|acdefaults|acprovision|acsitesetting|acdomainprotection|rollover|providersubscription|providersubscriptionv2)-app\"}[5m]))" + }, + { + "record": "pf:app_log_events:rate5m:avg_over_time_1w", + "expr": "avg_over_time(pf:app_log_events:rate5m[1w])" + }, + { + "record": "pf:app_log_events:rate5m:stddev_over_time_1w", + "expr": "stddev_over_time(pf:app_log_events:rate5m[1w])" + }, + { + "record": "pf:app_log_events:rate5m_prediction", + "expr": "clamp_min(quantile without (offset) (0.5, label_replace(avg_over_time(pf:app_log_events:rate5m[4h] offset 6d22h) + pf:app_log_events:rate5m:avg_over_time_1w - pf:app_log_events:rate5m:avg_over_time_1w offset 1w, \"offset\", \"1w\", \"\", \"\") or label_replace(avg_over_time(pf:app_log_events:rate5m[4h] offset 13d22h) + pf:app_log_events:rate5m:avg_over_time_1w - pf:app_log_events:rate5m:avg_over_time_1w offset 2w, \"offset\", \"2w\", \"\", \"\") or label_replace(avg_over_time(pf:app_log_events:rate5m[4h] offset 20d22h) + pf:app_log_events:rate5m:avg_over_time_1w - pf:app_log_events:rate5m:avg_over_time_1w offset 3w, \"offset\", \"3w\", \"\", \"\")), 0)" + }, + { + "record": "pf_pods_restart_too_much", + "expr": "rate(kube_pod_container_status_restarts_total{k8scluster=\"sy-kube01\",lp_service=~\"lp-(csds|mtls|rtbf|encryptionmgmt|acdefaults|acprovision|acsitesetting|acdomainprotection)-(web|app)\"}[5m]) > 0" + }, + { + "record": "pf_pods_are_unhealthy", + "expr": "health{k8scluster=\"sy-kube01\",lp_service=~\"lp-(csds|mtls|rtbf|encryptionmgmt|acdefaults|acprovision|acsitesetting|acdomainprotection)-(web|app)\"} > 0" + }, + { + "record": "pf_pod_dependencies_are_unhealthy", + "expr": "health_dependency{k8scluster=\"sy-kube01\",lp_service=~\"lp-(csds|mtls|rtbf|encryptionmgmt|acdefaults|acprovision|acsitesetting|acdomainprotection)-(web|app)\"} > 0" + }, + { + "record": "pf_nginx_request_rate_is_too_low", + "expr": "pf:nginx_http_requests:rate5m == 0" + }, + { + "record": "pf_app_request_rate_is_too_low", + "expr": "pf:app_http_requests:rate5m{application!~\"(lp-encryptionmgmt-app|lp-rtbf-app)\"} == 0" + }, + { + "record": "pf_nginx_request_rate_is_too_high", + "expr": "pf:nginx_http_requests:rate5m > 10000" + }, + { + "record": "pf_app_request_rate_is_too_high", + "expr": "pf:app_http_requests:rate5m > 10000" + }, + { + "record": "pf_nginx_request_rate_is_outside_normal_range", + "expr": "abs((pf:nginx_http_requests:rate5m - pf:nginx_http_requests:rate5m_prediction) / pf:nginx_http_requests:rate5m:stddev_over_time_1w) > 2" + }, + { + "record": "pf_app_request_rate_is_outside_normal_range", + "expr": "abs((pf:app_http_requests:rate5m - pf:app_http_requests:rate5m_prediction) / pf:app_http_requests:rate5m:stddev_over_time_1w) > 2" + }, + { + "record": "pf_nginx_response_time_is_too_high", + "expr": "pf:nginx_response_time:avg_over_time_5m > 0.5" + }, + { + "record": "pf_app_response_time_is_too_high", + "expr": "pf:app_response_time:avg_over_time_5m > 0.5" + }, + { + "record": "pf_nginx_response_time_is_outside_normal_range", + "expr": "abs((pf:nginx_response_time:avg_over_time_5m - pf:nginx_response_time:avg_over_time_5m_prediction) / pf:nginx_response_time:avg_over_time_5m:stddev_over_time_1w) > 2" + }, + { + "record": "pf_app_response_time_is_outside_normal_range", + "expr": "abs((pf:app_response_time:avg_over_time_5m - pf:app_response_time:avg_over_time_5m_prediction) / pf:app_response_time:avg_over_time_5m:stddev_over_time_1w) > 2" + }, + { + "record": "pf_nginx_4xx_rate_is_outside_normal_range", + "expr": "abs((pf:nginx_http_4xx_responses:rate5m - pf:nginx_http_4xx_responses:rate5m_prediction) / pf:nginx_http_4xx_responses:rate5m:stddev_over_time_1w) > 2" + }, + { + "record": "pf_app_4xx_rate_is_outside_normal_range", + "expr": "abs((pf:app_http_4xx_responses:rate5m - pf:app_http_4xx_responses:rate5m_prediction) / pf:app_http_4xx_responses:rate5m:stddev_over_time_1w) > 2" + }, + { + "record": "pf_nginx_4xx_ratio_exceeds_20", + "expr": "pf:nginx_http_4xx_responses:rate5m / pf:nginx_http_requests:rate5m > 20" + }, + { + "record": "pf_app_4xx_ratio_exceeds_20", + "expr": "pf:app_http_4xx_responses:rate5m / pf:app_http_requests:rate5m > 20" + }, + { + "record": "pf_nginx_5xx_rate_is_outside_normal_range", + "expr": "abs((pf:nginx_http_5xx_responses:rate5m - pf:nginx_http_5xx_responses:rate5m_prediction) / pf:nginx_http_5xx_responses:rate5m:stddev_over_time_1w) > 2" + }, + { + "record": "pf_app_5xx_rate_is_outside_normal_range", + "expr": "abs((pf:app_http_5xx_responses:rate5m - pf:app_http_5xx_responses:rate5m_prediction) / pf:app_http_5xx_responses:rate5m:stddev_over_time_1w) > 2" + }, + { + "record": "pf_nginx_5xx_ratio_exceeds_20", + "expr": "pf:nginx_http_5xx_responses:rate5m / pf:nginx_http_requests:rate5m > 20" + }, + { + "record": "pf_app_5xx_ratio_exceeds_20", + "expr": "pf:app_http_5xx_responses:rate5m / pf:app_http_requests:rate5m > 20" + }, + { + "record": "pf_log_rate_is_outside_normal_range", + "expr": "abs((pf:app_log_events:rate5m - pf:app_log_events:rate5m_prediction) / pf:app_log_events:rate5m:stddev_over_time_1w) > 2" + }, + { + "record": "pf_app_heap_usage_too_high", + "expr": "100 * (avg by (k8scluster, lp_service, kubernetes_pod_name) (container_memory_working_set_bytes{k8scluster=\"sy-kube01\",lp_service=~\"lp-(csds|mtls|rtbf|encryptionmgmt|acdefaults|acprovision|acsitesetting|acdomainprotection|rollover|providersubscription|providersubscriptionv2)-app\"}) / avg by (k8scluster, lp_service, kubernetes_pod_name) (container_spec_memory_limit_bytes{k8scluster=\"sy-kube01\",lp_service=~\"lp-(csds|mtls|rtbf|encryptionmgmt|acdefaults|acprovision|acsitesetting|acdomainprotection|rollover|providersubscription|providersubscriptionv2)-app\"})) > 90" + } + ] + } + ] +} diff --git a/pkg/ruler/rule_concurrency.go b/pkg/ruler/rule_concurrency.go index ba4a506c2a5..3b26f20e953 100644 --- a/pkg/ruler/rule_concurrency.go +++ b/pkg/ruler/rule_concurrency.go @@ -7,6 +7,7 @@ import ( "sync" "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/rules" @@ -115,6 +116,7 @@ func NewMultiTenantConcurrencyController(logger log.Logger, maxGlobalConcurrency // NewTenantConcurrencyControllerFor returns a new rules.RuleConcurrencyController to use for the input tenantID. func (c *MultiTenantConcurrencyController) NewTenantConcurrencyControllerFor(tenantID string) rules.RuleConcurrencyController { return &TenantConcurrencyController{ + logger: log.With(c.logger, "tenant", tenantID), slotsInUse: c.metrics.SlotsInUse.WithLabelValues(tenantID), attemptsStartedTotal: c.metrics.AttemptsStartedTotal.WithLabelValues(tenantID), attemptsIncompleteTotal: c.metrics.AttemptsIncompleteTotal.WithLabelValues(tenantID), @@ -132,6 +134,7 @@ func (c *MultiTenantConcurrencyController) NewTenantConcurrencyControllerFor(ten // TenantConcurrencyController is a concurrency controller that limits the number of concurrent rule evaluations per tenant. // It also takes into account the global concurrency limit. type TenantConcurrencyController struct { + logger log.Logger tenantID string thresholdRuleConcurrency float64 // Percentage of the rule interval at which we consider the rule group at risk of missing its evaluation. @@ -155,19 +158,7 @@ func (c *TenantConcurrencyController) Done(_ context.Context) { } // Allow tries to acquire a slot from the concurrency controller. -func (c *TenantConcurrencyController) Allow(_ context.Context, group *rules.Group, rule rules.Rule) bool { - // To allow a rule to be executed concurrently, we need 3 conditions: - // 1. The rule group must be at risk of missing its evaluation. - // 2. The rule must not have any rules that depend on it. - // 3. The rule itself must not depend on any other rules. - if !c.isGroupAtRisk(group) { - return false - } - - if !isRuleIndependent(rule) { - return false - } - +func (c *TenantConcurrencyController) Allow(_ context.Context, _ *rules.Group, _ rules.Rule) bool { // Next, try to acquire a global concurrency slot. c.attemptsStartedTotal.Inc() if !c.globalConcurrency.TryAcquire(1) { @@ -187,6 +178,84 @@ func (c *TenantConcurrencyController) Allow(_ context.Context, group *rules.Grou return false } +// SplitGroupIntoBatches splits the group into batches of rules that can be evaluated concurrently. +// It tries to batch rules that have no dependencies together and rules that have dependencies in separate batches. +// Returning no batches or nil means that the group should be evaluated sequentially. +func (c *TenantConcurrencyController) SplitGroupIntoBatches(_ context.Context, g *rules.Group) []rules.ConcurrentRules { + if !c.isGroupAtRisk(g) { + // If the group is not at risk, we can evaluate the rules sequentially. + return nil + } + + logger := log.With(c.logger, "group", g.Name()) + + type ruleInfo struct { + ruleIdx int + unevaluatedDependencies map[rules.Rule]struct{} + } + remainingRules := make(map[rules.Rule]ruleInfo) + + // This batch holds the rules that have no dependencies and will be run first. + firstBatch := rules.ConcurrentRules{} + for i, r := range g.Rules() { + if r.NoDependencyRules() { + firstBatch = append(firstBatch, i) + continue + } + // Initialize the rule info with the rule's dependencies. + // Use a copy of the dependencies to avoid mutating the rule. + info := ruleInfo{ruleIdx: i, unevaluatedDependencies: map[rules.Rule]struct{}{}} + for _, dep := range r.DependencyRules() { + info.unevaluatedDependencies[dep] = struct{}{} + } + remainingRules[r] = info + } + if len(firstBatch) == 0 { + // There are no rules without dependencies. + // Fall back to sequential evaluation. + level.Info(logger).Log("msg", "No rules without dependencies found, falling back to sequential rule evaluation.") + return nil + } + result := []rules.ConcurrentRules{firstBatch} + + // Build the order of rules to evaluate based on dependencies. + for len(remainingRules) > 0 { + previousBatch := result[len(result)-1] + // Remove the batch's rules from the dependencies of its dependents. + for _, idx := range previousBatch { + rule := g.Rules()[idx] + for _, dependent := range rule.DependentRules() { + dependentInfo := remainingRules[dependent] + delete(dependentInfo.unevaluatedDependencies, rule) + } + } + + var batch rules.ConcurrentRules + // Find rules that have no remaining dependencies. + for name, info := range remainingRules { + if len(info.unevaluatedDependencies) == 0 { + batch = append(batch, info.ruleIdx) + delete(remainingRules, name) + } + } + + if len(batch) == 0 { + // There is a cycle in the rules' dependencies. + // We can't evaluate them concurrently. + // Fall back to sequential evaluation. + level.Warn(logger).Log("msg", "Cyclic rule dependencies detected, falling back to sequential rule evaluation") + return nil + } + + result = append(result, batch) + } + + level.Info(logger).Log("msg", "Batched rules into concurrent blocks", "rules", len(g.Rules()), "batches", len(result)) + level.Debug(logger).Log("msg", "Batched rules into concurrent blocks", "batches", result) + + return result +} + // isGroupAtRisk checks if the rule group's last evaluation time is within the risk threshold. func (c *TenantConcurrencyController) isGroupAtRisk(group *rules.Group) bool { interval := group.Interval().Seconds() @@ -205,11 +274,6 @@ func (c *TenantConcurrencyController) isGroupAtRisk(group *rules.Group) bool { return false } -// isRuleIndependent checks if the rule is independent of other rules. -func isRuleIndependent(rule rules.Rule) bool { - return rule.NoDependentRules() && rule.NoDependencyRules() -} - // NoopMultiTenantConcurrencyController is a concurrency controller that does not allow for concurrency. type NoopMultiTenantConcurrencyController struct{} @@ -221,6 +285,10 @@ func (n *NoopMultiTenantConcurrencyController) NewTenantConcurrencyControllerFor type NoopTenantConcurrencyController struct{} func (n *NoopTenantConcurrencyController) Done(_ context.Context) {} +func (n *NoopTenantConcurrencyController) SplitGroupIntoBatches(_ context.Context, _ *rules.Group) []rules.ConcurrentRules { + return nil +} + func (n *NoopTenantConcurrencyController) Allow(_ context.Context, _ *rules.Group, _ rules.Rule) bool { return false } diff --git a/pkg/ruler/rule_concurrency_test.go b/pkg/ruler/rule_concurrency_test.go index 64be33715cc..e7c485db30a 100644 --- a/pkg/ruler/rule_concurrency_test.go +++ b/pkg/ruler/rule_concurrency_test.go @@ -6,6 +6,7 @@ import ( "bytes" "context" "fmt" + "os" "testing" "time" @@ -20,6 +21,7 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/atomic" "golang.org/x/sync/semaphore" + "gopkg.in/yaml.v3" "github.com/grafana/mimir/pkg/util/validation" ) @@ -106,8 +108,8 @@ func TestMultiTenantConcurrencyController(t *testing.T) { exp, err := parser.ParseExpr("vector(1)") require.NoError(t, err) rule1 := rules.NewRecordingRule("test", exp, labels.Labels{}) - rule1.SetNoDependencyRules(true) - rule1.SetNoDependentRules(true) + rule1.SetDependencyRules([]rules.Rule{}) + rule1.SetDependentRules([]rules.Rule{}) globalController := NewMultiTenantConcurrencyController(logger, 3, 50.0, reg, limits) user1Controller := globalController.NewTenantConcurrencyControllerFor("user1") @@ -171,40 +173,6 @@ cortex_ruler_independent_rule_evaluation_concurrency_attempts_completed_total{us user2Controller.Done(ctx) user2Controller.Done(ctx) - // Finally, let's try a few edge cases. - rg2 := rules.NewGroup(rules.GroupOptions{ - File: "test.rules", - Name: "test", - Interval: 1 * time.Minute, // group not at risk. - Opts: &rules.ManagerOptions{}, - }) - require.False(t, user1Controller.Allow(ctx, rg2, rule1)) // Should not be allowed with a group that is not at risk. - rule1.SetNoDependencyRules(false) - require.False(t, user1Controller.Allow(ctx, rg, rule1)) // Should not be allowed as the rule is no longer independent. - - // Check the metrics one final time to ensure there are no active slots in use. - require.NoError(t, testutil.GatherAndCompare(reg, bytes.NewBufferString(` -# HELP cortex_ruler_independent_rule_evaluation_concurrency_attempts_incomplete_total Total number of incomplete attempts to acquire concurrency slots across all tenants -# TYPE cortex_ruler_independent_rule_evaluation_concurrency_attempts_incomplete_total counter -cortex_ruler_independent_rule_evaluation_concurrency_attempts_incomplete_total{user="user1"} 2 -cortex_ruler_independent_rule_evaluation_concurrency_attempts_incomplete_total{user="user2"} 1 -# HELP cortex_ruler_independent_rule_evaluation_concurrency_attempts_started_total Total number of started attempts to acquire concurrency slots across all tenants -# TYPE cortex_ruler_independent_rule_evaluation_concurrency_attempts_started_total counter -cortex_ruler_independent_rule_evaluation_concurrency_attempts_started_total{user="user1"} 4 -cortex_ruler_independent_rule_evaluation_concurrency_attempts_started_total{user="user2"} 3 -# HELP cortex_ruler_independent_rule_evaluation_concurrency_slots_in_use Current number of concurrency slots currently in use across all tenants -# TYPE cortex_ruler_independent_rule_evaluation_concurrency_slots_in_use gauge -cortex_ruler_independent_rule_evaluation_concurrency_slots_in_use{user="user1"} 0 -cortex_ruler_independent_rule_evaluation_concurrency_slots_in_use{user="user2"} 0 -# HELP cortex_ruler_independent_rule_evaluation_concurrency_attempts_completed_total Total number of concurrency slots we're done using across all tenants -# TYPE cortex_ruler_independent_rule_evaluation_concurrency_attempts_completed_total counter -cortex_ruler_independent_rule_evaluation_concurrency_attempts_completed_total{user="user1"} 2 -cortex_ruler_independent_rule_evaluation_concurrency_attempts_completed_total{user="user2"} 2 -`))) - - // Make the rule independent again. - rule1.SetNoDependencyRules(true) - // Now let's test having a controller two times for the same tenant. user3Controller := globalController.NewTenantConcurrencyControllerFor("user3") user3ControllerTwo := globalController.NewTenantConcurrencyControllerFor("user3") @@ -215,91 +183,143 @@ cortex_ruler_independent_rule_evaluation_concurrency_attempts_completed_total{us require.True(t, user3ControllerTwo.Allow(ctx, rg, rule1)) } -func TestIsRuleIndependent(t *testing.T) { +func TestSplitGroupIntoBatches(t *testing.T) { + limits := validation.MockOverrides(func(_ *validation.Limits, tenantLimits map[string]*validation.Limits) { + tenantLimits["user1"] = validation.MockDefaultLimits() + tenantLimits["user1"].RulerMaxIndependentRuleEvaluationConcurrencyPerTenant = 2 + }) + + mtController := NewMultiTenantConcurrencyController(log.NewNopLogger(), 3, 50.0, prometheus.NewPedanticRegistry(), limits) + controller := mtController.NewTenantConcurrencyControllerFor("user1") + + ruleManager := rules.NewManager(&rules.ManagerOptions{ + RuleConcurrencyController: controller, + }) + tests := map[string]struct { - rule rules.Rule - expected bool + inputFile string + expectedGroups []rules.ConcurrentRules }{ - "rule has neither dependencies nor dependents": { - rule: func() rules.Rule { - r := rules.NewRecordingRule("test", nil, labels.Labels{}) - r.SetNoDependentRules(true) - r.SetNoDependencyRules(true) - return r - }(), - expected: true, + "chained": { + inputFile: "fixtures/rules_chain.yaml", + expectedGroups: []rules.ConcurrentRules{ + {0, 1}, + {2}, + {3, 4}, + {5, 6}, + }, }, - "rule has both dependencies and dependents": { - rule: func() rules.Rule { - r := rules.NewRecordingRule("test", nil, labels.Labels{}) - r.SetNoDependentRules(false) - r.SetNoDependencyRules(false) - return r - }(), - expected: false, + "indeterminates": { + inputFile: "fixtures/rules_indeterminates.yaml", + expectedGroups: nil, }, - "rule has dependents": { - rule: func() rules.Rule { - r := rules.NewRecordingRule("test", nil, labels.Labels{}) - r.SetNoDependentRules(false) - r.SetNoDependencyRules(true) - return r - }(), - expected: false, + "all independent": { + inputFile: "fixtures/rules_multiple_independent.yaml", + expectedGroups: []rules.ConcurrentRules{ + {0, 1, 2, 3, 4, 5}, + }, }, - "rule has dependencies": { - rule: func() rules.Rule { - r := rules.NewRecordingRule("test", nil, labels.Labels{}) - r.SetNoDependentRules(true) - r.SetNoDependencyRules(false) - return r - }(), - expected: false, + "topological sort": { + inputFile: "fixtures/rules_topological_sort_needed.json", + expectedGroups: []rules.ConcurrentRules{ + {0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 37, 38, 58}, + {1, 2, 5, 6, 9, 10, 13, 14, 17, 18, 21, 22, 25, 26, 29, 30, 33, 34, 39, 40, 41, 42, 45, 46, 51, 52, 55, 56}, + {3, 7, 11, 15, 19, 23, 27, 31, 35}, + {43, 44, 47, 48, 49, 50, 53, 54, 57}, + }, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - result := isRuleIndependent(tc.rule) - require.Equal(t, tc.expected, result) + // Load group with a -1 interval so it's always at risk. + groups, errs := ruleManager.LoadGroups(-1*time.Second, labels.EmptyLabels(), "", nil, []string{tc.inputFile}...) + require.Empty(t, errs) + require.Len(t, groups, 1) + + var group *rules.Group + for _, g := range groups { + group = g + } + + batches := controller.SplitGroupIntoBatches(context.Background(), group) + requireConcurrentRulesEqual(t, tc.expectedGroups, batches) + + // Make sure the group is not mutated and will still return the same batches when called again. + batches = controller.SplitGroupIntoBatches(context.Background(), group) + requireConcurrentRulesEqual(t, tc.expectedGroups, batches) }) } } +func requireConcurrentRulesEqual(t *testing.T, expected, actual []rules.ConcurrentRules) { + t.Helper() + + if expected == nil { + require.Nil(t, actual) + return + } + + // Like require.Equals but ignores the order of elements in the slices. + require.Len(t, actual, len(expected)) + for i, expectedBatch := range expected { + actualBatch := actual[i] + require.ElementsMatch(t, expectedBatch, actualBatch) + } +} + func TestGroupAtRisk(t *testing.T) { + // Write group file with 100 independent rules. + ruleCt := 100 + dummyRules := []map[string]interface{}{} + for i := 0; i < ruleCt; i++ { + dummyRules = append(dummyRules, map[string]interface{}{ + "record": fmt.Sprintf("test_rule%d", i), + "expr": "vector(1)", + }) + } + + groupFileContent := map[string]interface{}{ + "groups": []map[string]interface{}{ + { + "name": "test", + "rules": dummyRules, + }, + }, + } + + groupFile := t.TempDir() + "/test.rules" + f, err := os.Create(groupFile) + require.NoError(t, err) + encoder := yaml.NewEncoder(f) + require.NoError(t, encoder.Encode(groupFileContent)) + require.NoError(t, f.Close()) + createAndEvalTestGroup := func(interval time.Duration, evalConcurrently bool) *rules.Group { st := teststorage.New(t) defer st.Close() - // Create 100 rules that all take 1ms to evaluate. - var createdRules []rules.Rule - ruleCt := 100 ruleWaitTime := 1 * time.Millisecond - for i := 0; i < ruleCt; i++ { - q, err := parser.ParseExpr("vector(1)") - require.NoError(t, err) - rule := rules.NewRecordingRule(fmt.Sprintf("test_rule%d", i), q, labels.Labels{}) - rule.SetNoDependencyRules(true) - rule.SetNoDependentRules(true) - createdRules = append(createdRules, rule) - } - - // Create the group and evaluate it - opts := rules.GroupOptions{ - Interval: interval, - Opts: &rules.ManagerOptions{ - Appendable: st, - QueryFunc: func(_ context.Context, _ string, _ time.Time) (promql.Vector, error) { - time.Sleep(ruleWaitTime) - return promql.Vector{}, nil - }, + opts := &rules.ManagerOptions{ + Appendable: st, + // Make the rules take 1ms to evaluate. + QueryFunc: func(_ context.Context, _ string, _ time.Time) (promql.Vector, error) { + time.Sleep(ruleWaitTime) + return promql.Vector{}, nil }, - Rules: createdRules, } if evalConcurrently { - opts.Opts.RuleConcurrencyController = &allowAllConcurrencyController{} + opts.RuleConcurrencyController = &allowAllConcurrencyController{} } - g := rules.NewGroup(opts) + manager := rules.NewManager(opts) + groups, errs := manager.LoadGroups(interval, labels.EmptyLabels(), "", nil, groupFile) + require.Empty(t, errs) + + var g *rules.Group + for _, group := range groups { + g = group + } + rules.DefaultEvalIterationFunc(context.Background(), g, time.Now()) // Sanity check that we're actually running the rules concurrently. @@ -371,4 +391,12 @@ func (a *allowAllConcurrencyController) Allow(_ context.Context, _ *rules.Group, return true } +func (a *allowAllConcurrencyController) SplitGroupIntoBatches(_ context.Context, g *rules.Group) []rules.ConcurrentRules { + batch := rules.ConcurrentRules{} + for i := range g.Rules() { + batch = append(batch, i) + } + return []rules.ConcurrentRules{batch} +} + func (a *allowAllConcurrencyController) Done(_ context.Context) {} diff --git a/pkg/ruler/rule_query_consistency_test.go b/pkg/ruler/rule_query_consistency_test.go index 90091f6bf3f..834a8ff3452 100644 --- a/pkg/ruler/rule_query_consistency_test.go +++ b/pkg/ruler/rule_query_consistency_test.go @@ -50,7 +50,7 @@ func TestWrapQueryFuncWithReadConsistency(t *testing.T) { t.Run("should inject strong read consistency if the rule has dependencies", func(t *testing.T) { r := rules.NewRecordingRule("", &parser.StringLiteral{}, labels.New()) - r.SetNoDependencyRules(false) + r.SetDependencyRules([]rules.Rule{rules.NewRecordingRule("other", &parser.StringLiteral{}, labels.New())}) ctx := rules.NewOriginContext(context.Background(), rules.NewRuleDetail(r)) hasReadConsistency, readConsistencyLevel := runWrappedFunc(ctx) @@ -60,7 +60,7 @@ func TestWrapQueryFuncWithReadConsistency(t *testing.T) { t.Run("should not inject read consistency level if the rule has no dependencies, to let run with the per-tenant default", func(t *testing.T) { r := rules.NewRecordingRule("", &parser.StringLiteral{}, labels.New()) - r.SetNoDependencyRules(true) + r.SetDependencyRules([]rules.Rule{}) ctx := rules.NewOriginContext(context.Background(), rules.NewRuleDetail(r)) hasReadConsistency, _ := runWrappedFunc(ctx) diff --git a/pkg/streamingpromql/operators/functions/range_vectors.go b/pkg/streamingpromql/operators/functions/range_vectors.go index f1270146064..1b7a533267d 100644 --- a/pkg/streamingpromql/operators/functions/range_vectors.go +++ b/pkg/streamingpromql/operators/functions/range_vectors.go @@ -67,17 +67,22 @@ func presentOverTime(step *types.RangeVectorStepData, _ float64, _ types.EmitAnn } var MaxOverTime = FunctionOverRangeVectorDefinition{ - SeriesMetadataFunction: DropSeriesName, - StepFunc: maxOverTime, + SeriesMetadataFunction: DropSeriesName, + StepFunc: maxOverTime, + NeedsSeriesNamesForAnnotations: true, } -func maxOverTime(step *types.RangeVectorStepData, _ float64, _ types.EmitAnnotationFunc) (float64, bool, *histogram.FloatHistogram, error) { +func maxOverTime(step *types.RangeVectorStepData, _ float64, emitAnnotation types.EmitAnnotationFunc) (float64, bool, *histogram.FloatHistogram, error) { head, tail := step.Floats.UnsafePoints() if len(head) == 0 && len(tail) == 0 { return 0, false, nil, nil } + if step.Histograms.Any() { + emitAnnotation(annotations.NewHistogramIgnoredInMixedRangeInfo) + } + maxSoFar := head[0].F head = head[1:] @@ -97,17 +102,22 @@ func maxOverTime(step *types.RangeVectorStepData, _ float64, _ types.EmitAnnotat } var MinOverTime = FunctionOverRangeVectorDefinition{ - SeriesMetadataFunction: DropSeriesName, - StepFunc: minOverTime, + SeriesMetadataFunction: DropSeriesName, + StepFunc: minOverTime, + NeedsSeriesNamesForAnnotations: true, } -func minOverTime(step *types.RangeVectorStepData, _ float64, _ types.EmitAnnotationFunc) (float64, bool, *histogram.FloatHistogram, error) { +func minOverTime(step *types.RangeVectorStepData, _ float64, emitAnnotation types.EmitAnnotationFunc) (float64, bool, *histogram.FloatHistogram, error) { head, tail := step.Floats.UnsafePoints() if len(head) == 0 && len(tail) == 0 { return 0, false, nil, nil } + if step.Histograms.Any() { + emitAnnotation(annotations.NewHistogramIgnoredInMixedRangeInfo) + } + minSoFar := head[0].F head = head[1:] @@ -475,11 +485,8 @@ var Deriv = FunctionOverRangeVectorDefinition{ func deriv(step *types.RangeVectorStepData, _ float64, emitAnnotation types.EmitAnnotationFunc) (float64, bool, *histogram.FloatHistogram, error) { fHead, fTail := step.Floats.UnsafePoints() - hHead, hTail := step.Histograms.UnsafePoints() - - haveHistograms := len(hHead) > 0 || len(hTail) > 0 - if len(fHead)+len(fTail) == 1 && haveHistograms { + if len(fHead)+len(fTail) == 1 && step.Histograms.Any() { emitAnnotation(annotations.NewHistogramIgnoredInMixedRangeInfo) return 0, false, nil, nil } @@ -490,7 +497,7 @@ func deriv(step *types.RangeVectorStepData, _ float64, emitAnnotation types.Emit slope, _ := linearRegression(fHead, fTail, fHead[0].T) - if haveHistograms { + if step.Histograms.Any() { emitAnnotation(annotations.NewHistogramIgnoredInMixedRangeInfo) } diff --git a/pkg/streamingpromql/testdata/ours/functions.test b/pkg/streamingpromql/testdata/ours/functions.test index 6a8e7964415..fb0ebaef5ac 100644 --- a/pkg/streamingpromql/testdata/ours/functions.test +++ b/pkg/streamingpromql/testdata/ours/functions.test @@ -118,7 +118,7 @@ eval range from 0 to 7m step 1m present_over_time(some_metric_count[3m1s]) eval range from 0 to 7m step 1m present_over_time(some_metric_count[6s]) {foo="bar"} 1 1 1 1 _ _ 1 1 -eval range from 0 to 7m step 1m min_over_time(some_metric_count[3m1s]) +eval_info range from 0 to 7m step 1m min_over_time(some_metric_count[3m1s]) {foo="bar"} 0 0 0 0 1 2 3 _ eval range from 0 to 7m step 1m min_over_time(some_metric_count[6s]) @@ -127,7 +127,7 @@ eval range from 0 to 7m step 1m min_over_time(some_metric_count[6s]) eval range from 0 to 16m step 1m min_over_time(some_inf_and_nan_metric[3m1s]) {foo="baz"} 0 0 0 0 1 2 3 Inf Inf Inf NaN 8 7 6 6 6 6 -eval range from 0 to 7m step 1m max_over_time(some_metric_count[3m1s]) +eval_info range from 0 to 7m step 1m max_over_time(some_metric_count[3m1s]) {foo="bar"} 0 1 2 3 3 3 3 _ eval range from 0 to 7m step 1m max_over_time(some_metric_count[6s]) diff --git a/pkg/streamingpromql/testdata/upstream/functions.test b/pkg/streamingpromql/testdata/upstream/functions.test index 4e716c1185b..b6ae2b1d39b 100644 --- a/pkg/streamingpromql/testdata/upstream/functions.test +++ b/pkg/streamingpromql/testdata/upstream/functions.test @@ -975,41 +975,70 @@ eval instant at 1m avg_over_time(metric[2m]) # Tests for stddev_over_time and stdvar_over_time. clear load 10s - metric 0 8 8 2 3 + metric 0 8 8 2 3 + metric_histogram{type="only_histogram"} {{schema:1 sum:2 count:3}}x5 + metric_histogram{type="mix"} 1 1 1 {{schema:1 sum:2 count:3}} {{schema:1 sum:2 count:3}} # Unsupported by streaming engine. # eval instant at 1m stdvar_over_time(metric[2m]) -# {} 10.56 +# {} 10.56 # Unsupported by streaming engine. # eval instant at 1m stddev_over_time(metric[2m]) -# {} 3.249615 +# {} 3.249615 # Unsupported by streaming engine. # eval instant at 1m stddev_over_time((metric[2m])) -# {} 3.249615 +# {} 3.249615 + +# Tests for stddev_over_time and stdvar_over_time with histograms. +# Unsupported by streaming engine. +# eval instant at 1m stddev_over_time(metric_histogram{type="only_histogram"}[2m]) +# #empty + +# Unsupported by streaming engine. +# eval_info instant at 1m stddev_over_time(metric_histogram{type="mix"}[2m]) +# {type="mix"} 0 + +# Unsupported by streaming engine. +# eval instant at 1m stdvar_over_time(metric_histogram{type="only_histogram"}[2m]) +# #empty + +# Unsupported by streaming engine. +# eval_info instant at 1m stdvar_over_time(metric_histogram{type="mix"}[2m]) +# {type="mix"} 0 # Tests for stddev_over_time and stdvar_over_time #4927. clear load 10s - metric 1.5990505637277868 1.5990505637277868 1.5990505637277868 + metric 1.5990505637277868 1.5990505637277868 1.5990505637277868 # Unsupported by streaming engine. # eval instant at 1m stdvar_over_time(metric[1m]) -# {} 0 +# {} 0 # Unsupported by streaming engine. # eval instant at 1m stddev_over_time(metric[1m]) -# {} 0 +# {} 0 # Tests for mad_over_time. clear load 10s - metric 4 6 2 1 999 1 2 + metric 4 6 2 1 999 1 2 + metric_histogram{type="only_histogram"} {{schema:1 sum:2 count:3}}x5 + metric_histogram{type="mix"} 1 1 1 {{schema:1 sum:2 count:3}} {{schema:1 sum:2 count:3}} # Unsupported by streaming engine. # eval instant at 70s mad_over_time(metric[70s]) -# {} 1 +# {} 1 + +# Unsupported by streaming engine. +# eval instant at 70s mad_over_time(metric_histogram{type="only_histogram"}[70s]) +# #empty + +# Unsupported by streaming engine. +# eval_info instant at 70s mad_over_time(metric_histogram{type="mix"}[70s]) +# {type="mix"} 0 # Tests for quantile_over_time clear @@ -1018,6 +1047,8 @@ load 10s data{test="two samples"} 0 1 data{test="three samples"} 0 1 2 data{test="uneven samples"} 0 1 4 + data_histogram{test="only histogram samples"} {{schema:0 sum:1 count:2}}x4 + data_histogram{test="mix samples"} 0 1 2 {{schema:0 sum:1 count:2}}x2 # Unsupported by streaming engine. # eval instant at 1m quantile_over_time(0, data[2m]) @@ -1067,6 +1098,14 @@ load 10s # {test="three samples"} +Inf # {test="uneven samples"} +Inf +# Unsupported by streaming engine. +# eval instant at 1m quantile_over_time(0.5, data_histogram{test="only histogram samples"}[2m]) +# #empty + +# Unsupported by streaming engine. +# eval_info instant at 1m quantile_over_time(0.5, data_histogram{test="mix samples"}[2m]) +# {test="mix samples"} 1 + clear # Test time-related functions. @@ -1212,15 +1251,17 @@ load 5m eval_fail instant at 0m changes({__name__=~'testmetric1|testmetric2'}[5m]) -# Tests for *_over_time clear +# Tests for *_over_time load 10s data{type="numbers"} 2 0 3 data{type="some_nan"} 2 0 NaN data{type="some_nan2"} 2 NaN 1 data{type="some_nan3"} NaN 0 1 data{type="only_nan"} NaN NaN NaN + data_histogram{type="only_histogram"} {{schema:0 sum:1 count:2}} {{schema:0 sum:2 count:3}} {{schema:0 sum:3 count:4}} + data_histogram{type="mix_samples"} 0 1 {{schema:0 sum:1 count:2}} {{schema:0 sum:2 count:3}} eval instant at 1m min_over_time(data[2m]) {type="numbers"} 0 @@ -1229,6 +1270,12 @@ eval instant at 1m min_over_time(data[2m]) {type="some_nan3"} 0 {type="only_nan"} NaN +eval instant at 1m min_over_time(data_histogram{type="only_histogram"}[2m]) + #empty + +eval_info instant at 1m min_over_time(data_histogram{type="mix_samples"}[2m]) + {type="mix_samples"} 0 + eval instant at 1m max_over_time(data[2m]) {type="numbers"} 3 {type="some_nan"} 2 @@ -1236,12 +1283,29 @@ eval instant at 1m max_over_time(data[2m]) {type="some_nan3"} 1 {type="only_nan"} NaN -eval instant at 1m last_over_time(data[2m]) +eval instant at 1m max_over_time(data_histogram{type="only_histogram"}[2m]) + #empty + +eval_info instant at 1m max_over_time(data_histogram{type="mix_samples"}[2m]) + {type="mix_samples"} 1 + +eval instant at 1m last_over_time({__name__=~"data(_histogram)?"}[2m]) data{type="numbers"} 3 data{type="some_nan"} NaN data{type="some_nan2"} 1 data{type="some_nan3"} 1 data{type="only_nan"} NaN + data_histogram{type="only_histogram"} {{schema:0 sum:3 count:4}} + data_histogram{type="mix_samples"} {{schema:0 sum:2 count:3}} + +eval instant at 1m count_over_time({__name__=~"data(_histogram)?"}[2m]) + {type="numbers"} 3 + {type="some_nan"} 3 + {type="some_nan2"} 3 + {type="some_nan3"} 3 + {type="only_nan"} 3 + {type="only_histogram"} 3 + {type="mix_samples"} 4 clear diff --git a/pkg/streamingpromql/testdata/upstream/native_histograms.test b/pkg/streamingpromql/testdata/upstream/native_histograms.test index 3616d455911..61123239cec 100644 --- a/pkg/streamingpromql/testdata/upstream/native_histograms.test +++ b/pkg/streamingpromql/testdata/upstream/native_histograms.test @@ -1133,6 +1133,42 @@ eval_warn range from 0 to 12m step 6m sum(metric) eval_warn range from 0 to 12m step 6m avg(metric) {} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} _ +# Test incompatible schemas with additional aggregation operators +eval range from 0 to 12m step 6m count(metric) + {} 2 2 3 + +eval range from 0 to 12m step 6m group(metric) + {} 1 1 1 + +# Unsupported by streaming engine. +# eval range from 0 to 12m step 6m count(limitk(1, metric)) +# {} 1 1 1 + +# Unsupported by streaming engine. +# eval range from 0 to 12m step 6m limitk(3, metric) +# metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} +# metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} +# metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + +# Unsupported by streaming engine. +# eval range from 0 to 12m step 6m limit_ratio(1, metric) +# metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} +# metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} +# metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + +# Test incompatible schemas with and/or +eval range from 0 to 12m step 6m metric{series="1"} and ignoring(series) metric{series="2"} + metric{series="1"} _ _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + +eval range from 0 to 12m step 6m metric{series="1"} or ignoring(series) metric{series="2"} + metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ _ + +# Test incompatible schemas with arithmetic binary operators +eval_warn range from 0 to 12m step 6m metric{series="2"} + ignoring (series) metric{series="3"} + +eval_warn range from 0 to 12m step 6m metric{series="2"} - ignoring (series) metric{series="3"} + clear load 1m diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index 5f31a3db180..2d809571d4c 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -691,9 +691,15 @@ func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNod // === mad_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Matrix)[0].Floats) == 0 { + samples := vals[0].(Matrix)[0] + var annos annotations.Annotations + if len(samples.Floats) == 0 { return enh.Out, nil } + if len(samples.Histograms) > 0 { + metricName := samples.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return aggrOverTime(vals, enh, func(s Series) float64 { values := make(vectorByValueHeap, 0, len(s.Floats)) for _, f := range s.Floats { @@ -705,18 +711,20 @@ func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode values = append(values, Sample{F: math.Abs(f.F - median)}) } return quantile(0.5, values) - }), nil + }), annos } // === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Matrix)[0].Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. max_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. + samples := vals[0].(Matrix)[0] + var annos annotations.Annotations + if len(samples.Floats) == 0 { return enh.Out, nil } + if len(samples.Histograms) > 0 { + metricName := samples.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return aggrOverTime(vals, enh, func(s Series) float64 { maxVal := s.Floats[0].F for _, f := range s.Floats { @@ -725,18 +733,20 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } } return maxVal - }), nil + }), annos } // === min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Matrix)[0].Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. min_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. + samples := vals[0].(Matrix)[0] + var annos annotations.Annotations + if len(samples.Floats) == 0 { return enh.Out, nil } + if len(samples.Histograms) > 0 { + metricName := samples.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return aggrOverTime(vals, enh, func(s Series) float64 { minVal := s.Floats[0].F for _, f := range s.Floats { @@ -745,7 +755,7 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode } } return minVal - }), nil + }), annos } // === sum_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === @@ -794,10 +804,6 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva q := vals[0].(Vector)[0].F el := vals[1].(Matrix)[0] if len(el.Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. quantile_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. return enh.Out, nil } @@ -805,7 +811,10 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva if math.IsNaN(q) || q < 0 || q > 1 { annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange())) } - + if len(el.Histograms) > 0 { + metricName := el.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInAggregationInfo(metricName, args[0].PositionRange())) + } values := make(vectorByValueHeap, 0, len(el.Floats)) for _, f := range el.Floats { values = append(values, Sample{F: f.F}) @@ -815,13 +824,15 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva // === stddev_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Matrix)[0].Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. stddev_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. + samples := vals[0].(Matrix)[0] + var annos annotations.Annotations + if len(samples.Floats) == 0 { return enh.Out, nil } + if len(samples.Histograms) > 0 { + metricName := samples.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 var mean, cMean float64 @@ -833,18 +844,20 @@ func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } return math.Sqrt((aux + cAux) / count) - }), nil + }), annos } // === stdvar_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - if len(vals[0].(Matrix)[0].Floats) == 0 { - // TODO(beorn7): The passed values only contain - // histograms. stdvar_over_time ignores histograms for now. If - // there are only histograms, we have to return without adding - // anything to enh.Out. + samples := vals[0].(Matrix)[0] + var annos annotations.Annotations + if len(samples.Floats) == 0 { return enh.Out, nil } + if len(samples.Histograms) > 0 { + metricName := samples.Metric.Get(labels.MetricName) + annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange())) + } return aggrOverTime(vals, enh, func(s Series) float64 { var count float64 var mean, cMean float64 @@ -856,7 +869,7 @@ func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux) } return (aux + cAux) / count - }), nil + }), annos } // === absent(Vector parser.ValueTypeVector) (Vector, Annotations) === diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/README.md b/vendor/github.com/prometheus/prometheus/promql/promqltest/README.md index af343542412..25c2653ab3b 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/README.md +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/README.md @@ -22,7 +22,7 @@ Each test file contains a series of commands. There are three kinds of commands: * `load` * `clear` -* `eval` +* `eval` (including the variants `eval_fail`, `eval_warn`, `eval_info`, and `eval_ordered`) Each command is executed in the order given in the file. @@ -50,12 +50,12 @@ load 1m my_metric{env="prod"} 5 2+3x2 _ stale {{schema:1 sum:3 count:22 buckets:[5 10 7]}} ``` -...will create a single series with labels `my_metric{env="prod"}`, with the following points: +… will create a single series with labels `my_metric{env="prod"}`, with the following points: * t=0: value is 5 * t=1m: value is 2 * t=2m: value is 5 -* t=3m: value is 7 +* t=3m: value is 8 * t=4m: no point * t=5m: stale marker * t=6m: native histogram with schema 1, sum -3, count 22 and bucket counts 5, 10 and 7 @@ -74,6 +74,7 @@ When loading a batch of classic histogram float series, you can optionally appen ## `eval` command `eval` runs a query against the test environment and asserts that the result is as expected. +It requires the query to succeed without any (info or warn) annotations. Both instant and range queries are supported. @@ -110,11 +111,18 @@ eval range from 0 to 3m step 1m sum by (env) (my_metric) {env="test"} 10 20 30 45 ``` -Instant queries also support asserting that the series are returned in exactly the order specified: use `eval_ordered instant ...` instead of `eval instant ...`. -This is not supported for range queries. +To assert that a query succeeds with an info or warn annotation, use the +`eval_info` or `eval_warn` commands, respectively. -It is also possible to test that queries fail: use `eval_fail instant ...` or `eval_fail range ...`. -`eval_fail` optionally takes an expected error message string or regexp to assert that the error message is as expected. +Instant queries also support asserting that the series are returned in exactly +the order specified: use `eval_ordered instant ...` instead of `eval instant +...`. `eval_ordered` ignores any annotations. The assertion always fails for +matrix results. + +To assert that a query fails, use the `eval_fail` command. `eval_fail` does not +expect any result lines. Instead, it optionally accepts an expected error +message string or regular expression to assert that the error message is as +expected. For example: diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go b/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go index efa2136f10a..5e0d9083cbf 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go @@ -39,6 +39,7 @@ import ( "github.com/prometheus/prometheus/promql/parser/posrange" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/almost" + "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/convertnhcb" "github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/testutil" @@ -692,6 +693,24 @@ func (ev *evalCmd) expectMetric(pos int, m labels.Labels, vals ...parser.Sequenc ev.expected[h] = entry{pos: pos, vals: vals} } +// checkAnnotations asserts if the annotations match the expectations. +func (ev *evalCmd) checkAnnotations(expr string, annos annotations.Annotations) error { + countWarnings, countInfo := annos.CountWarningsAndInfo() + switch { + case ev.ordered: + // Ignore annotations if testing for order. + case !ev.warn && countWarnings > 0: + return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", expr, ev.line, annos.AsErrors()) + case ev.warn && countWarnings == 0: + return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", expr, ev.line) + case !ev.info && countInfo > 0: + return fmt.Errorf("unexpected info annotations evaluating query %q (line %d): %v", expr, ev.line, annos.AsErrors()) + case ev.info && countInfo == 0: + return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", expr, ev.line) + } + return nil +} + // compareResult compares the result value with the defined expectation. func (ev *evalCmd) compareResult(result parser.Value) error { switch val := result.(type) { @@ -1131,6 +1150,7 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error { if err != nil { return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err) } + defer q.Close() res := q.Exec(t.context) if res.Err != nil { if cmd.fail { @@ -1142,18 +1162,9 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error { if res.Err == nil && cmd.fail { return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line) } - countWarnings, countInfo := res.Warnings.CountWarningsAndInfo() - switch { - case !cmd.warn && countWarnings > 0: - return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings) - case cmd.warn && countWarnings == 0: - return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", cmd.expr, cmd.line) - case !cmd.info && countInfo > 0: - return fmt.Errorf("unexpected info annotations evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings) - case cmd.info && countInfo == 0: - return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", cmd.expr, cmd.line) + if err := cmd.checkAnnotations(cmd.expr, res.Warnings); err != nil { + return err } - defer q.Close() if err := cmd.compareResult(res.Value); err != nil { return fmt.Errorf("error in %s %s (line %d): %w", cmd, cmd.expr, cmd.line, err) @@ -1196,16 +1207,8 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq if res.Err == nil && cmd.fail { return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line) } - countWarnings, countInfo := res.Warnings.CountWarningsAndInfo() - switch { - case !cmd.warn && countWarnings > 0: - return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings) - case cmd.warn && countWarnings == 0: - return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line) - case !cmd.info && countInfo > 0: - return fmt.Errorf("unexpected info annotations evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings) - case cmd.info && countInfo == 0: - return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", iq.expr, cmd.line) + if err := cmd.checkAnnotations(iq.expr, res.Warnings); err != nil { + return err } err = cmd.compareResult(res.Value) if err != nil { @@ -1218,11 +1221,11 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq if err != nil { return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err) } + defer q.Close() rangeRes := q.Exec(t.context) if rangeRes.Err != nil { return fmt.Errorf("error evaluating query %q (line %d) in range mode: %w", iq.expr, cmd.line, rangeRes.Err) } - defer q.Close() if cmd.ordered { // Range queries are always sorted by labels, so skip this test case that expects results in a particular order. return nil @@ -1416,8 +1419,8 @@ func (ll *LazyLoader) appendTill(ts int64) error { // WithSamplesTill loads the samples till given timestamp and executes the given function. func (ll *LazyLoader) WithSamplesTill(ts time.Time, fn func(error)) { - tsMilli := ts.Sub(time.Unix(0, 0).UTC()) / time.Millisecond - fn(ll.appendTill(int64(tsMilli))) + till := ts.Sub(time.Unix(0, 0).UTC()) / time.Millisecond + fn(ll.appendTill(int64(till))) } // QueryEngine returns the LazyLoader's query engine. diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test index 6d2ade3abc4..7fc636450fc 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test @@ -929,35 +929,58 @@ eval instant at 1m avg_over_time(metric[2m]) # Tests for stddev_over_time and stdvar_over_time. clear load 10s - metric 0 8 8 2 3 + metric 0 8 8 2 3 + metric_histogram{type="only_histogram"} {{schema:1 sum:2 count:3}}x5 + metric_histogram{type="mix"} 1 1 1 {{schema:1 sum:2 count:3}} {{schema:1 sum:2 count:3}} eval instant at 1m stdvar_over_time(metric[2m]) - {} 10.56 + {} 10.56 eval instant at 1m stddev_over_time(metric[2m]) - {} 3.249615 + {} 3.249615 eval instant at 1m stddev_over_time((metric[2m])) - {} 3.249615 + {} 3.249615 + +# Tests for stddev_over_time and stdvar_over_time with histograms. +eval instant at 1m stddev_over_time(metric_histogram{type="only_histogram"}[2m]) + #empty + +eval_info instant at 1m stddev_over_time(metric_histogram{type="mix"}[2m]) + {type="mix"} 0 + +eval instant at 1m stdvar_over_time(metric_histogram{type="only_histogram"}[2m]) + #empty + +eval_info instant at 1m stdvar_over_time(metric_histogram{type="mix"}[2m]) + {type="mix"} 0 # Tests for stddev_over_time and stdvar_over_time #4927. clear load 10s - metric 1.5990505637277868 1.5990505637277868 1.5990505637277868 + metric 1.5990505637277868 1.5990505637277868 1.5990505637277868 eval instant at 1m stdvar_over_time(metric[1m]) - {} 0 + {} 0 eval instant at 1m stddev_over_time(metric[1m]) - {} 0 + {} 0 # Tests for mad_over_time. clear load 10s - metric 4 6 2 1 999 1 2 + metric 4 6 2 1 999 1 2 + metric_histogram{type="only_histogram"} {{schema:1 sum:2 count:3}}x5 + metric_histogram{type="mix"} 1 1 1 {{schema:1 sum:2 count:3}} {{schema:1 sum:2 count:3}} eval instant at 70s mad_over_time(metric[70s]) - {} 1 + {} 1 + +eval instant at 70s mad_over_time(metric_histogram{type="only_histogram"}[70s]) + #empty + +eval_info instant at 70s mad_over_time(metric_histogram{type="mix"}[70s]) + {type="mix"} 0 # Tests for quantile_over_time clear @@ -966,6 +989,8 @@ load 10s data{test="two samples"} 0 1 data{test="three samples"} 0 1 2 data{test="uneven samples"} 0 1 4 + data_histogram{test="only histogram samples"} {{schema:0 sum:1 count:2}}x4 + data_histogram{test="mix samples"} 0 1 2 {{schema:0 sum:1 count:2}}x2 eval instant at 1m quantile_over_time(0, data[2m]) {test="two samples"} 0 @@ -1007,6 +1032,12 @@ eval_warn instant at 1m (quantile_over_time(2, (data[2m]))) {test="three samples"} +Inf {test="uneven samples"} +Inf +eval instant at 1m quantile_over_time(0.5, data_histogram{test="only histogram samples"}[2m]) + #empty + +eval_info instant at 1m quantile_over_time(0.5, data_histogram{test="mix samples"}[2m]) + {test="mix samples"} 1 + clear # Test time-related functions. @@ -1120,15 +1151,17 @@ load 5m eval_fail instant at 0m changes({__name__=~'testmetric1|testmetric2'}[5m]) -# Tests for *_over_time clear +# Tests for *_over_time load 10s data{type="numbers"} 2 0 3 data{type="some_nan"} 2 0 NaN data{type="some_nan2"} 2 NaN 1 data{type="some_nan3"} NaN 0 1 data{type="only_nan"} NaN NaN NaN + data_histogram{type="only_histogram"} {{schema:0 sum:1 count:2}} {{schema:0 sum:2 count:3}} {{schema:0 sum:3 count:4}} + data_histogram{type="mix_samples"} 0 1 {{schema:0 sum:1 count:2}} {{schema:0 sum:2 count:3}} eval instant at 1m min_over_time(data[2m]) {type="numbers"} 0 @@ -1137,6 +1170,12 @@ eval instant at 1m min_over_time(data[2m]) {type="some_nan3"} 0 {type="only_nan"} NaN +eval instant at 1m min_over_time(data_histogram{type="only_histogram"}[2m]) + #empty + +eval_info instant at 1m min_over_time(data_histogram{type="mix_samples"}[2m]) + {type="mix_samples"} 0 + eval instant at 1m max_over_time(data[2m]) {type="numbers"} 3 {type="some_nan"} 2 @@ -1144,12 +1183,29 @@ eval instant at 1m max_over_time(data[2m]) {type="some_nan3"} 1 {type="only_nan"} NaN -eval instant at 1m last_over_time(data[2m]) +eval instant at 1m max_over_time(data_histogram{type="only_histogram"}[2m]) + #empty + +eval_info instant at 1m max_over_time(data_histogram{type="mix_samples"}[2m]) + {type="mix_samples"} 1 + +eval instant at 1m last_over_time({__name__=~"data(_histogram)?"}[2m]) data{type="numbers"} 3 data{type="some_nan"} NaN data{type="some_nan2"} 1 data{type="some_nan3"} 1 data{type="only_nan"} NaN + data_histogram{type="only_histogram"} {{schema:0 sum:3 count:4}} + data_histogram{type="mix_samples"} {{schema:0 sum:2 count:3}} + +eval instant at 1m count_over_time({__name__=~"data(_histogram)?"}[2m]) + {type="numbers"} 3 + {type="some_nan"} 3 + {type="some_nan2"} 3 + {type="some_nan3"} 3 + {type="only_nan"} 3 + {type="only_histogram"} 3 + {type="mix_samples"} 4 clear diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test index 6be298cf7d6..414619d5cde 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test @@ -1128,6 +1128,39 @@ eval_warn range from 0 to 12m step 6m sum(metric) eval_warn range from 0 to 12m step 6m avg(metric) {} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} _ +# Test incompatible schemas with additional aggregation operators +eval range from 0 to 12m step 6m count(metric) + {} 2 2 3 + +eval range from 0 to 12m step 6m group(metric) + {} 1 1 1 + +eval range from 0 to 12m step 6m count(limitk(1, metric)) + {} 1 1 1 + +eval range from 0 to 12m step 6m limitk(3, metric) + metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} + metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + +eval range from 0 to 12m step 6m limit_ratio(1, metric) + metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} + metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + +# Test incompatible schemas with and/or +eval range from 0 to 12m step 6m metric{series="1"} and ignoring(series) metric{series="2"} + metric{series="1"} _ _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + +eval range from 0 to 12m step 6m metric{series="1"} or ignoring(series) metric{series="2"} + metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ _ + +# Test incompatible schemas with arithmetic binary operators +eval_warn range from 0 to 12m step 6m metric{series="2"} + ignoring (series) metric{series="3"} + +eval_warn range from 0 to 12m step 6m metric{series="2"} - ignoring (series) metric{series="3"} + clear load 1m diff --git a/vendor/github.com/prometheus/prometheus/rules/alerting.go b/vendor/github.com/prometheus/prometheus/rules/alerting.go index 4f40788e27c..ec498c2f5fd 100644 --- a/vendor/github.com/prometheus/prometheus/rules/alerting.go +++ b/vendor/github.com/prometheus/prometheus/rules/alerting.go @@ -143,8 +143,9 @@ type AlertingRule struct { logger *slog.Logger - noDependentRules *atomic.Bool - noDependencyRules *atomic.Bool + dependenciesMutex sync.RWMutex + dependentRules []Rule + dependencyRules []Rule } // NewAlertingRule constructs a new AlertingRule. @@ -171,8 +172,6 @@ func NewAlertingRule( evaluationTimestamp: atomic.NewTime(time.Time{}), evaluationDuration: atomic.NewDuration(0), lastError: atomic.NewError(nil), - noDependentRules: atomic.NewBool(false), - noDependencyRules: atomic.NewBool(false), } } @@ -316,20 +315,54 @@ func (r *AlertingRule) Restored() bool { return r.restored.Load() } -func (r *AlertingRule) SetNoDependentRules(noDependentRules bool) { - r.noDependentRules.Store(noDependentRules) +func (r *AlertingRule) SetDependentRules(dependents []Rule) { + r.dependenciesMutex.Lock() + defer r.dependenciesMutex.Unlock() + + r.dependentRules = make([]Rule, len(dependents)) + copy(r.dependentRules, dependents) } func (r *AlertingRule) NoDependentRules() bool { - return r.noDependentRules.Load() + r.dependenciesMutex.RLock() + defer r.dependenciesMutex.RUnlock() + + if r.dependentRules == nil { + return false // We don't know if there are dependent rules. + } + + return len(r.dependentRules) == 0 +} + +func (r *AlertingRule) DependentRules() []Rule { + r.dependenciesMutex.RLock() + defer r.dependenciesMutex.RUnlock() + return r.dependentRules } -func (r *AlertingRule) SetNoDependencyRules(noDependencyRules bool) { - r.noDependencyRules.Store(noDependencyRules) +func (r *AlertingRule) SetDependencyRules(dependencies []Rule) { + r.dependenciesMutex.Lock() + defer r.dependenciesMutex.Unlock() + + r.dependencyRules = make([]Rule, len(dependencies)) + copy(r.dependencyRules, dependencies) } func (r *AlertingRule) NoDependencyRules() bool { - return r.noDependencyRules.Load() + r.dependenciesMutex.RLock() + defer r.dependenciesMutex.RUnlock() + + if r.dependencyRules == nil { + return false // We don't know if there are dependency rules. + } + + return len(r.dependencyRules) == 0 +} + +func (r *AlertingRule) DependencyRules() []Rule { + r.dependenciesMutex.RLock() + defer r.dependenciesMutex.RUnlock() + return r.dependencyRules } // resolvedRetention is the duration for which a resolved alert instance diff --git a/vendor/github.com/prometheus/prometheus/rules/group.go b/vendor/github.com/prometheus/prometheus/rules/group.go index 0965dc27636..3d1bdb22ef9 100644 --- a/vendor/github.com/prometheus/prometheus/rules/group.go +++ b/vendor/github.com/prometheus/prometheus/rules/group.go @@ -75,8 +75,6 @@ type Group struct { // defaults to DefaultEvalIterationFunc. evalIterationFunc GroupEvalIterationFunc - // concurrencyController controls the rules evaluation concurrency. - concurrencyController RuleConcurrencyController appOpts *storage.AppendOptions alignEvaluationTimeOnInterval bool } @@ -130,11 +128,6 @@ func NewGroup(o GroupOptions) *Group { evalIterationFunc = DefaultEvalIterationFunc } - concurrencyController := opts.RuleConcurrencyController - if concurrencyController == nil { - concurrencyController = sequentialRuleEvalController{} - } - if opts.Logger == nil { opts.Logger = promslog.NewNopLogger() } @@ -156,7 +149,6 @@ func NewGroup(o GroupOptions) *Group { logger: opts.Logger.With("file", o.File, "group", o.Name), metrics: metrics, evalIterationFunc: evalIterationFunc, - concurrencyController: concurrencyController, appOpts: &storage.AppendOptions{DiscardOutOfOrder: true}, alignEvaluationTimeOnInterval: o.AlignEvaluationTimeOnInterval, } @@ -659,29 +651,51 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { } var wg sync.WaitGroup - for i, rule := range g.rules { - select { - case <-g.done: - // There's a chance that the group is asked to return early. In that case, we should - // wait for any in-flight rules to finish evaluating before returning so that we can preserve the same semantics. - // At the time of writing, the main reason for this was to make sure we don't clear seriesInPreviousEval before we're done using it. - wg.Wait() - return - default: - } - - if ctrl := g.concurrencyController; ctrl.Allow(ctx, g, rule) { - wg.Add(1) + ctrl := g.opts.RuleConcurrencyController + if ctrl == nil { + ctrl = sequentialRuleEvalController{} + } - go eval(i, rule, func() { - wg.Done() - ctrl.Done(ctx) - }) - } else { + batches := ctrl.SplitGroupIntoBatches(ctx, g) + if len(batches) == 0 { + // Sequential evaluation when batches aren't set. + // This is the behaviour without a defined RuleConcurrencyController + for i, rule := range g.rules { + // Check if the group has been stopped. + select { + case <-g.done: + return + default: + } eval(i, rule, nil) } + } else { + // Concurrent evaluation. + for _, batch := range batches { + for _, ruleIndex := range batch { + // Check if the group has been stopped. + select { + case <-g.done: + wg.Wait() + return + default: + } + rule := g.rules[ruleIndex] + if len(batch) > 1 && ctrl.Allow(ctx, g, rule) { + wg.Add(1) + + go eval(ruleIndex, rule, func() { + wg.Done() + ctrl.Done(ctx) + }) + } else { + eval(ruleIndex, rule, nil) + } + } + // It is important that we finish processing any rules in this current batch - before we move into the next one. + wg.Wait() + } } - wg.Wait() g.metrics.GroupSamples.WithLabelValues(GroupKey(g.File(), g.Name())).Set(samplesTotal.Load()) g.cleanupStaleSeries(ctx, ts) @@ -1076,27 +1090,25 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics { // output metric produced by another rule in its expression (i.e. as its "input"). type dependencyMap map[Rule][]Rule -// dependents returns the count of rules which use the output of the given rule as one of their inputs. -func (m dependencyMap) dependents(r Rule) int { - return len(m[r]) +// dependents returns the rules which use the output of the given rule as one of their inputs. +func (m dependencyMap) dependents(r Rule) []Rule { + return m[r] } -// dependencies returns the count of rules on which the given rule is dependent for input. -func (m dependencyMap) dependencies(r Rule) int { +// dependencies returns the rules on which the given rule is dependent for input. +func (m dependencyMap) dependencies(r Rule) []Rule { if len(m) == 0 { - return 0 + return []Rule{} } - var count int - for _, children := range m { - for _, child := range children { - if child == r { - count++ - } + var dependencies []Rule + for rule, dependents := range m { + if slices.Contains(dependents, r) { + dependencies = append(dependencies, rule) } } - return count + return dependencies } // isIndependent determines whether the given rule is not dependent on another rule for its input, nor is any other rule @@ -1106,7 +1118,7 @@ func (m dependencyMap) isIndependent(r Rule) bool { return false } - return m.dependents(r)+m.dependencies(r) == 0 + return len(m.dependents(r)) == 0 && len(m.dependencies(r)) == 0 } // buildDependencyMap builds a data-structure which contains the relationships between rules within a group. diff --git a/vendor/github.com/prometheus/prometheus/rules/manager.go b/vendor/github.com/prometheus/prometheus/rules/manager.go index 58020126e52..62cf6258101 100644 --- a/vendor/github.com/prometheus/prometheus/rules/manager.go +++ b/vendor/github.com/prometheus/prometheus/rules/manager.go @@ -473,8 +473,8 @@ func SendAlerts(s Sender, externalURL string) NotifyFunc { // RuleDependencyController controls whether a set of rules have dependencies between each other. type RuleDependencyController interface { // AnalyseRules analyses dependencies between the input rules. For each rule that it's guaranteed - // not having any dependants and/or dependency, this function should call Rule.SetNoDependentRules(true) - // and/or Rule.SetNoDependencyRules(true). + // not having any dependants and/or dependency, this function should call Rule.SetDependentRules(...) + // and/or Rule.SetDependencyRules(...). AnalyseRules(rules []Rule) } @@ -489,15 +489,22 @@ func (c ruleDependencyController) AnalyseRules(rules []Rule) { } for _, r := range rules { - r.SetNoDependentRules(depMap.dependents(r) == 0) - r.SetNoDependencyRules(depMap.dependencies(r) == 0) + r.SetDependentRules(depMap.dependents(r)) + r.SetDependencyRules(depMap.dependencies(r)) } } +// ConcurrentRules represents a slice of indexes of rules that can be evaluated concurrently. +type ConcurrentRules []int + // RuleConcurrencyController controls concurrency for rules that are safe to be evaluated concurrently. // Its purpose is to bound the amount of concurrency in rule evaluations to avoid overwhelming the Prometheus // server with additional query load. Concurrency is controlled globally, not on a per-group basis. type RuleConcurrencyController interface { + // SplitGroupIntoBatches returns an ordered slice of of ConcurrentRules, which are batches of rules that can be evaluated concurrently. + // The rules are represented by their index from the input rule group. + SplitGroupIntoBatches(ctx context.Context, group *Group) []ConcurrentRules + // Allow determines if the given rule is allowed to be evaluated concurrently. // If Allow() returns true, then Done() must be called to release the acquired slot and corresponding cleanup is done. // It is important that both *Group and Rule are not retained and only be used for the duration of the call. @@ -519,21 +526,51 @@ func newRuleConcurrencyController(maxConcurrency int64) RuleConcurrencyControlle } func (c *concurrentRuleEvalController) Allow(_ context.Context, _ *Group, rule Rule) bool { - // To allow a rule to be executed concurrently, we need 3 conditions: - // 1. The rule must not have any rules that depend on it. - // 2. The rule itself must not depend on any other rules. - // 3. If 1 & 2 are true, then and only then we should try to acquire the concurrency slot. - if rule.NoDependentRules() && rule.NoDependencyRules() { - return c.sema.TryAcquire(1) + return c.sema.TryAcquire(1) +} + +func (c *concurrentRuleEvalController) SplitGroupIntoBatches(_ context.Context, g *Group) []ConcurrentRules { + // Using the rule dependency controller information (rules being identified as having no dependencies or no dependants), + // we can safely run the following concurrent groups: + // 1. Concurrently, all rules that have no dependencies + // 2. Sequentially, all rules that have both dependencies and dependants + // 3. Concurrently, all rules that have no dependants + + var noDependencies []int + var dependenciesAndDependants []int + var noDependants []int + + for i, r := range g.rules { + switch { + case r.NoDependencyRules(): + noDependencies = append(noDependencies, i) + case !r.NoDependentRules() && !r.NoDependencyRules(): + dependenciesAndDependants = append(dependenciesAndDependants, i) + case r.NoDependentRules(): + noDependants = append(noDependants, i) + } } - return false + var order []ConcurrentRules + if len(noDependencies) > 0 { + order = append(order, noDependencies) + } + for _, r := range dependenciesAndDependants { + order = append(order, []int{r}) + } + if len(noDependants) > 0 { + order = append(order, noDependants) + } + + return order } func (c *concurrentRuleEvalController) Done(_ context.Context) { c.sema.Release(1) } +var _ RuleConcurrencyController = &sequentialRuleEvalController{} + // sequentialRuleEvalController is a RuleConcurrencyController that runs every rule sequentially. type sequentialRuleEvalController struct{} @@ -541,6 +578,10 @@ func (c sequentialRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) return false } +func (c sequentialRuleEvalController) SplitGroupIntoBatches(_ context.Context, g *Group) []ConcurrentRules { + return nil +} + func (c sequentialRuleEvalController) Done(_ context.Context) {} // FromMaps returns new sorted Labels from the given maps, overriding each other in order. diff --git a/vendor/github.com/prometheus/prometheus/rules/recording.go b/vendor/github.com/prometheus/prometheus/rules/recording.go index 52c2a875ab5..3b6db210af1 100644 --- a/vendor/github.com/prometheus/prometheus/rules/recording.go +++ b/vendor/github.com/prometheus/prometheus/rules/recording.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "net/url" + "sync" "time" "go.uber.org/atomic" @@ -43,8 +44,9 @@ type RecordingRule struct { // Duration of how long it took to evaluate the recording rule. evaluationDuration *atomic.Duration - noDependentRules *atomic.Bool - noDependencyRules *atomic.Bool + dependenciesMutex sync.RWMutex + dependentRules []Rule + dependencyRules []Rule } // NewRecordingRule returns a new recording rule. @@ -57,8 +59,6 @@ func NewRecordingRule(name string, vector parser.Expr, lset labels.Labels) *Reco evaluationTimestamp: atomic.NewTime(time.Time{}), evaluationDuration: atomic.NewDuration(0), lastError: atomic.NewError(nil), - noDependentRules: atomic.NewBool(false), - noDependencyRules: atomic.NewBool(false), } } @@ -172,18 +172,52 @@ func (rule *RecordingRule) GetEvaluationTimestamp() time.Time { return rule.evaluationTimestamp.Load() } -func (rule *RecordingRule) SetNoDependentRules(noDependentRules bool) { - rule.noDependentRules.Store(noDependentRules) +func (rule *RecordingRule) SetDependentRules(dependents []Rule) { + rule.dependenciesMutex.Lock() + defer rule.dependenciesMutex.Unlock() + + rule.dependentRules = make([]Rule, len(dependents)) + copy(rule.dependentRules, dependents) } func (rule *RecordingRule) NoDependentRules() bool { - return rule.noDependentRules.Load() + rule.dependenciesMutex.RLock() + defer rule.dependenciesMutex.RUnlock() + + if rule.dependentRules == nil { + return false // We don't know if there are dependent rules. + } + + return len(rule.dependentRules) == 0 +} + +func (rule *RecordingRule) DependentRules() []Rule { + rule.dependenciesMutex.RLock() + defer rule.dependenciesMutex.RUnlock() + return rule.dependentRules } -func (rule *RecordingRule) SetNoDependencyRules(noDependencyRules bool) { - rule.noDependencyRules.Store(noDependencyRules) +func (rule *RecordingRule) SetDependencyRules(dependencies []Rule) { + rule.dependenciesMutex.Lock() + defer rule.dependenciesMutex.Unlock() + + rule.dependencyRules = make([]Rule, len(dependencies)) + copy(rule.dependencyRules, dependencies) } func (rule *RecordingRule) NoDependencyRules() bool { - return rule.noDependencyRules.Load() + rule.dependenciesMutex.RLock() + defer rule.dependenciesMutex.RUnlock() + + if rule.dependencyRules == nil { + return false // We don't know if there are dependency rules. + } + + return len(rule.dependencyRules) == 0 +} + +func (rule *RecordingRule) DependencyRules() []Rule { + rule.dependenciesMutex.RLock() + defer rule.dependenciesMutex.RUnlock() + return rule.dependencyRules } diff --git a/vendor/github.com/prometheus/prometheus/rules/rule.go b/vendor/github.com/prometheus/prometheus/rules/rule.go index 687c03d000d..33f1755ac52 100644 --- a/vendor/github.com/prometheus/prometheus/rules/rule.go +++ b/vendor/github.com/prometheus/prometheus/rules/rule.go @@ -62,19 +62,25 @@ type Rule interface { // NOTE: Used dynamically by rules.html template. GetEvaluationTimestamp() time.Time - // SetNoDependentRules sets whether there's no other rule in the rule group that depends on this rule. - SetNoDependentRules(bool) + // SetDependentRules sets rules which depend on the output of this rule. + SetDependentRules(rules []Rule) // NoDependentRules returns true if it's guaranteed that in the rule group there's no other rule // which depends on this one. In case this function returns false there's no such guarantee, which // means there may or may not be other rules depending on this one. NoDependentRules() bool - // SetNoDependencyRules sets whether this rule doesn't depend on the output of any rule in the rule group. - SetNoDependencyRules(bool) + // DependentRules returns the rules which depend on the output of this rule. + DependentRules() []Rule + + // SetDependencyRules sets rules on which this rule depends. + SetDependencyRules(rules []Rule) // NoDependencyRules returns true if it's guaranteed that this rule doesn't depend on the output of // any other rule in the group. In case this function returns false there's no such guarantee, which // means the rule may or may not depend on other rules. NoDependencyRules() bool + + // DependencyRules returns the rules on which this rule depends. + DependencyRules() []Rule } diff --git a/vendor/github.com/prometheus/prometheus/scrape/target.go b/vendor/github.com/prometheus/prometheus/scrape/target.go index d05866f8630..22cde01c055 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/target.go +++ b/vendor/github.com/prometheus/prometheus/scrape/target.go @@ -295,12 +295,12 @@ func (t *Target) intervalAndTimeout(defaultInterval, defaultDuration time.Durati intervalLabel := t.labels.Get(model.ScrapeIntervalLabel) interval, err := model.ParseDuration(intervalLabel) if err != nil { - return defaultInterval, defaultDuration, fmt.Errorf("Error parsing interval label %q: %w", intervalLabel, err) + return defaultInterval, defaultDuration, fmt.Errorf("error parsing interval label %q: %w", intervalLabel, err) } timeoutLabel := t.labels.Get(model.ScrapeTimeoutLabel) timeout, err := model.ParseDuration(timeoutLabel) if err != nil { - return defaultInterval, defaultDuration, fmt.Errorf("Error parsing timeout label %q: %w", timeoutLabel, err) + return defaultInterval, defaultDuration, fmt.Errorf("error parsing timeout label %q: %w", timeoutLabel, err) } return time.Duration(interval), time.Duration(timeout), nil diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go b/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go index 9306dcb4c28..d7f376c96a8 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go @@ -38,7 +38,7 @@ type Watchable interface { type noopScrapeManager struct{} func (noop *noopScrapeManager) Get() (*scrape.Manager, error) { - return nil, errors.New("Scrape manager not ready") + return nil, errors.New("scrape manager not ready") } // MetadataWatcher watches the Scrape Manager for a given WriteMetadataTo. diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go index 475c126eff3..4b966059f65 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go @@ -2119,7 +2119,7 @@ func compressPayload(tmpbuf *[]byte, inp []byte, enc Compression) (compressed [] } return compressed, nil default: - return compressed, fmt.Errorf("Unknown compression scheme [%v]", enc) + return compressed, fmt.Errorf("unknown compression scheme [%v]", enc) } } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go index 18447f0b6fc..6b1cf835027 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go @@ -863,6 +863,11 @@ func (it *ListPostings) Err() error { return nil } +// Len returns the remaining number of postings in the list. +func (it *ListPostings) Len() int { + return len(it.list) +} + // bigEndianPostings implements the Postings interface over a byte stream of // big endian numbers. type bigEndianPostings struct { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go index 6f1bc1df35a..ca74a9ceafc 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go @@ -679,7 +679,7 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err // Ensure we read the whole contents of every segment in the checkpoint dir. segs, err := listSegments(checkpointDir) if err != nil { - return fmt.Errorf("Unable to get segments checkpoint dir: %w", err) + return fmt.Errorf("unable to get segments checkpoint dir: %w", err) } for _, segRef := range segs { size, err := getSegmentSize(checkpointDir, segRef.index) diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index caba3900f55..5861ac23f19 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -144,6 +144,8 @@ type PrometheusVersion struct { type RuntimeInfo struct { StartTime time.Time `json:"startTime"` CWD string `json:"CWD"` + Hostname string `json:"hostname"` + ServerTime time.Time `json:"serverTime"` ReloadConfigSuccess bool `json:"reloadConfigSuccess"` LastConfigTime time.Time `json:"lastConfigTime"` CorruptionCount int64 `json:"corruptionCount"` @@ -437,6 +439,10 @@ func (api *API) options(*http.Request) apiFuncResult { } func (api *API) query(r *http.Request) (result apiFuncResult) { + limit, err := parseLimitParam(r.FormValue("limit")) + if err != nil { + return invalidParamError(err, "limit") + } ts, err := parseTimeParam(r, "time", api.now()) if err != nil { return invalidParamError(err, "time") @@ -478,6 +484,15 @@ func (api *API) query(r *http.Request) (result apiFuncResult) { return apiFuncResult{nil, returnAPIError(res.Err), res.Warnings, qry.Close} } + warnings := res.Warnings + if limit > 0 { + var isTruncated bool + + res, isTruncated = truncateResults(res, limit) + if isTruncated { + warnings = warnings.Add(errors.New("results truncated due to limit")) + } + } // Optional stats field in response if parameter "stats" is not empty. sr := api.statsRenderer if sr == nil { @@ -489,7 +504,7 @@ func (api *API) query(r *http.Request) (result apiFuncResult) { ResultType: res.Value.Type(), Result: res.Value, Stats: qs, - }, nil, res.Warnings, qry.Close} + }, nil, warnings, qry.Close} } func (api *API) formatQuery(r *http.Request) (result apiFuncResult) { @@ -525,6 +540,10 @@ func extractQueryOpts(r *http.Request) (promql.QueryOpts, error) { } func (api *API) queryRange(r *http.Request) (result apiFuncResult) { + limit, err := parseLimitParam(r.FormValue("limit")) + if err != nil { + return invalidParamError(err, "limit") + } start, err := parseTime(r.FormValue("start")) if err != nil { return invalidParamError(err, "start") @@ -589,6 +608,16 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) { return apiFuncResult{nil, returnAPIError(res.Err), res.Warnings, qry.Close} } + warnings := res.Warnings + if limit > 0 { + var isTruncated bool + + res, isTruncated = truncateResults(res, limit) + if isTruncated { + warnings = warnings.Add(errors.New("results truncated due to limit")) + } + } + // Optional stats field in response if parameter "stats" is not empty. sr := api.statsRenderer if sr == nil { @@ -600,7 +629,7 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) { ResultType: res.Value.Type(), Result: res.Value, Stats: qs, - }, nil, res.Warnings, qry.Close} + }, nil, warnings, qry.Close} } func (api *API) queryExemplars(r *http.Request) apiFuncResult { @@ -2015,7 +2044,7 @@ func parseTimeParam(r *http.Request, paramName string, defaultValue time.Time) ( } result, err := parseTime(val) if err != nil { - return time.Time{}, fmt.Errorf("Invalid time value for '%s': %w", paramName, err) + return time.Time{}, fmt.Errorf("invalid time value for '%s': %w", paramName, err) } return result, nil } @@ -2101,3 +2130,25 @@ func toHintLimit(limit int) int { } return limit } + +// truncateResults truncates result for queryRange() and query(). +// No truncation for other types(Scalars or Strings). +func truncateResults(result *promql.Result, limit int) (*promql.Result, bool) { + isTruncated := false + + switch v := result.Value.(type) { + case promql.Matrix: + if len(v) > limit { + result.Value = v[:limit] + isTruncated = true + } + case promql.Vector: + if len(v) > limit { + result.Value = v[:limit] + isTruncated = true + } + } + + // Return the modified result. Unchanged for other types. + return result, isTruncated +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 0f989a32b49..809e3eacc42 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1040,7 +1040,7 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v0.0.0-20250109135143-114aaaadc203 +# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v0.0.0-20250110020350-a1e2bcf4a615 ## explicit; go 1.22.7 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -1704,7 +1704,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/walk sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 sigs.k8s.io/yaml/goyaml.v3 -# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20250109135143-114aaaadc203 +# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20250110020350-a1e2bcf4a615 # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe # gopkg.in/yaml.v3 => github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094 # github.com/grafana/regexp => github.com/grafana/regexp v0.0.0-20240531075221-3685f1377d7b From 5c97a5d9b571765f97fde827e315294ca90897d3 Mon Sep 17 00:00:00 2001 From: Jack Baldry Date: Fri, 10 Jan 2025 19:10:42 +0000 Subject: [PATCH 10/18] Fix only run on fork guard (#10402) The previous guard fails because `github.repository` resolves to the base repository on `pull_request` events. --- .github/workflows/deploy-pr-preview.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/deploy-pr-preview.yml b/.github/workflows/deploy-pr-preview.yml index 92eda749b74..ea6db81daa1 100644 --- a/.github/workflows/deploy-pr-preview.yml +++ b/.github/workflows/deploy-pr-preview.yml @@ -11,7 +11,7 @@ on: jobs: deploy-pr-preview: - if: github.repository == 'grafana/mimir' + if: ! github.event.pull_request.head.repo.fork uses: grafana/writers-toolkit/.github/workflows/deploy-preview.yml@main with: sha: ${{ github.event.pull_request.head.sha }} From ed3160e9d40d0f78973de20e9b8cf1dbd5f32b84 Mon Sep 17 00:00:00 2001 From: Julien Duchesne Date: Fri, 10 Jan 2025 14:21:12 -0500 Subject: [PATCH 11/18] Rule Concurrency: Fix debug log (#10409) --- pkg/ruler/rule_concurrency.go | 13 +++- pkg/ruler/rule_concurrency_test.go | 103 +++++++++++++++++++---------- 2 files changed, 80 insertions(+), 36 deletions(-) diff --git a/pkg/ruler/rule_concurrency.go b/pkg/ruler/rule_concurrency.go index 3b26f20e953..09e721ff088 100644 --- a/pkg/ruler/rule_concurrency.go +++ b/pkg/ruler/rule_concurrency.go @@ -4,6 +4,7 @@ package ruler import ( "context" + "fmt" "sync" "github.com/go-kit/log" @@ -178,6 +179,16 @@ func (c *TenantConcurrencyController) Allow(_ context.Context, _ *rules.Group, _ return false } +// stringableConcurrentRules is a type that allows us to print a slice of rules.ConcurrentRules. +// This prevents premature evaluation, it will only be evaluated when the logger needs to print it. +type stringableConcurrentRules []rules.ConcurrentRules + +func (p stringableConcurrentRules) String() string { + return fmt.Sprintf("%v", []rules.ConcurrentRules(p)) +} + +var _ fmt.Stringer = stringableConcurrentRules{} + // SplitGroupIntoBatches splits the group into batches of rules that can be evaluated concurrently. // It tries to batch rules that have no dependencies together and rules that have dependencies in separate batches. // Returning no batches or nil means that the group should be evaluated sequentially. @@ -251,7 +262,7 @@ func (c *TenantConcurrencyController) SplitGroupIntoBatches(_ context.Context, g } level.Info(logger).Log("msg", "Batched rules into concurrent blocks", "rules", len(g.Rules()), "batches", len(result)) - level.Debug(logger).Log("msg", "Batched rules into concurrent blocks", "batches", result) + level.Debug(logger).Log("msg", "Batched rules into concurrent blocks", "batches", stringableConcurrentRules(result)) return result } diff --git a/pkg/ruler/rule_concurrency_test.go b/pkg/ruler/rule_concurrency_test.go index e7c485db30a..36a88c1d306 100644 --- a/pkg/ruler/rule_concurrency_test.go +++ b/pkg/ruler/rule_concurrency_test.go @@ -183,6 +183,40 @@ cortex_ruler_independent_rule_evaluation_concurrency_attempts_completed_total{us require.True(t, user3ControllerTwo.Allow(ctx, rg, rule1)) } +var splitToBatchesTestCases = map[string]struct { + inputFile string + expectedGroups []rules.ConcurrentRules +}{ + "chained": { + inputFile: "fixtures/rules_chain.yaml", + expectedGroups: []rules.ConcurrentRules{ + {0, 1}, + {2}, + {3, 4}, + {5, 6}, + }, + }, + "indeterminates": { + inputFile: "fixtures/rules_indeterminates.yaml", + expectedGroups: nil, + }, + "all independent": { + inputFile: "fixtures/rules_multiple_independent.yaml", + expectedGroups: []rules.ConcurrentRules{ + {0, 1, 2, 3, 4, 5}, + }, + }, + "topological sort": { + inputFile: "fixtures/rules_topological_sort_needed.json", + expectedGroups: []rules.ConcurrentRules{ + {0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 37, 38, 58}, + {1, 2, 5, 6, 9, 10, 13, 14, 17, 18, 21, 22, 25, 26, 29, 30, 33, 34, 39, 40, 41, 42, 45, 46, 51, 52, 55, 56}, + {3, 7, 11, 15, 19, 23, 27, 31, 35}, + {43, 44, 47, 48, 49, 50, 53, 54, 57}, + }, + }, +} + func TestSplitGroupIntoBatches(t *testing.T) { limits := validation.MockOverrides(func(_ *validation.Limits, tenantLimits map[string]*validation.Limits) { tenantLimits["user1"] = validation.MockDefaultLimits() @@ -196,41 +230,7 @@ func TestSplitGroupIntoBatches(t *testing.T) { RuleConcurrencyController: controller, }) - tests := map[string]struct { - inputFile string - expectedGroups []rules.ConcurrentRules - }{ - "chained": { - inputFile: "fixtures/rules_chain.yaml", - expectedGroups: []rules.ConcurrentRules{ - {0, 1}, - {2}, - {3, 4}, - {5, 6}, - }, - }, - "indeterminates": { - inputFile: "fixtures/rules_indeterminates.yaml", - expectedGroups: nil, - }, - "all independent": { - inputFile: "fixtures/rules_multiple_independent.yaml", - expectedGroups: []rules.ConcurrentRules{ - {0, 1, 2, 3, 4, 5}, - }, - }, - "topological sort": { - inputFile: "fixtures/rules_topological_sort_needed.json", - expectedGroups: []rules.ConcurrentRules{ - {0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 37, 38, 58}, - {1, 2, 5, 6, 9, 10, 13, 14, 17, 18, 21, 22, 25, 26, 29, 30, 33, 34, 39, 40, 41, 42, 45, 46, 51, 52, 55, 56}, - {3, 7, 11, 15, 19, 23, 27, 31, 35}, - {43, 44, 47, 48, 49, 50, 53, 54, 57}, - }, - }, - } - - for name, tc := range tests { + for name, tc := range splitToBatchesTestCases { t.Run(name, func(t *testing.T) { // Load group with a -1 interval so it's always at risk. groups, errs := ruleManager.LoadGroups(-1*time.Second, labels.EmptyLabels(), "", nil, []string{tc.inputFile}...) @@ -252,6 +252,39 @@ func TestSplitGroupIntoBatches(t *testing.T) { } } +func BenchmarkSplitGroupIntoBatches(b *testing.B) { + limits := validation.MockOverrides(func(_ *validation.Limits, tenantLimits map[string]*validation.Limits) { + tenantLimits["user1"] = validation.MockDefaultLimits() + tenantLimits["user1"].RulerMaxIndependentRuleEvaluationConcurrencyPerTenant = 2 + }) + + mtController := NewMultiTenantConcurrencyController(log.NewNopLogger(), 3, 50.0, prometheus.NewPedanticRegistry(), limits) + controller := mtController.NewTenantConcurrencyControllerFor("user1") + + ruleManager := rules.NewManager(&rules.ManagerOptions{ + RuleConcurrencyController: controller, + }) + + for name, tc := range splitToBatchesTestCases { + b.Run(name, func(b *testing.B) { + // Load group with a -1 interval so it's always at risk. + groups, errs := ruleManager.LoadGroups(-1*time.Second, labels.EmptyLabels(), "", nil, []string{tc.inputFile}...) + require.Empty(b, errs) + require.Len(b, groups, 1) + + var group *rules.Group + for _, g := range groups { + group = g + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = controller.SplitGroupIntoBatches(context.Background(), group) + } + }) + } +} + func requireConcurrentRulesEqual(t *testing.T, expected, actual []rules.ConcurrentRules) { t.Helper() From ccb73ed7daab58b9fd0a481110b388293bfcc577 Mon Sep 17 00:00:00 2001 From: Joshua Hesketh Date: Mon, 13 Jan 2025 15:06:42 +1100 Subject: [PATCH 12/18] MQE: Tidy up pool size handling (#10372) * MQE: Tidy up pool size handling Primarily from feedback on https://github.com/grafana/mimir/pull/10261 Rather than allowing an arbitrary maxSize in pools, require them to be a power of two since that is what happens internally anyway. We keep the checks and protections around requiring it to be a power of two. * Fix lint * add comment --- .../types/fpoint_ring_buffer.go | 9 +- .../types/hpoint_ring_buffer.go | 5 +- pkg/streamingpromql/types/limiting_pool.go | 4 +- .../types/limiting_pool_test.go | 97 +------------------ pkg/streamingpromql/types/pool.go | 4 +- pkg/streamingpromql/types/pool_test.go | 17 ++++ pkg/streamingpromql/types/ring_buffer_test.go | 29 +----- pkg/util/pool/bucketed_pool.go | 16 ++- pkg/util/pool/bucketed_pool_test.go | 69 ++++++------- 9 files changed, 76 insertions(+), 174 deletions(-) create mode 100644 pkg/streamingpromql/types/pool_test.go diff --git a/pkg/streamingpromql/types/fpoint_ring_buffer.go b/pkg/streamingpromql/types/fpoint_ring_buffer.go index 53dbd1d1387..2303e7d5568 100644 --- a/pkg/streamingpromql/types/fpoint_ring_buffer.go +++ b/pkg/streamingpromql/types/fpoint_ring_buffer.go @@ -8,6 +8,7 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/grafana/mimir/pkg/streamingpromql/limiting" + "github.com/grafana/mimir/pkg/util/pool" ) // FPointRingBuffer and HPointRingBuffer are nearly identical, but exist for each @@ -61,7 +62,7 @@ func (b *FPointRingBuffer) Append(p promql.FPoint) error { return err } - if !isPowerOfTwo(cap(newSlice)) { + if !pool.IsPowerOfTwo(cap(newSlice)) { // We rely on the capacity being a power of two for the pointsIndexMask optimisation below. // If we can guarantee that newSlice has a capacity that is a power of two in the future, then we can drop this check. return fmt.Errorf("pool returned slice of capacity %v (requested %v), but wanted a power of two", cap(newSlice), newSize) @@ -148,7 +149,7 @@ func (b *FPointRingBuffer) Release() { // should not return s to the pool themselves. // s must have a capacity that is a power of two. func (b *FPointRingBuffer) Use(s []promql.FPoint) error { - if !isPowerOfTwo(cap(s)) { + if !pool.IsPowerOfTwo(cap(s)) { // We rely on the capacity being a power of two for the pointsIndexMask optimisation below. return fmt.Errorf("slice capacity must be a power of two, but is %v", cap(s)) } @@ -261,7 +262,3 @@ func (v FPointRingBufferView) Any() bool { // These hooks exist so we can override them during unit tests. var getFPointSliceForRingBuffer = FPointSlicePool.Get var putFPointSliceForRingBuffer = FPointSlicePool.Put - -func isPowerOfTwo(n int) bool { - return (n & (n - 1)) == 0 -} diff --git a/pkg/streamingpromql/types/hpoint_ring_buffer.go b/pkg/streamingpromql/types/hpoint_ring_buffer.go index a34805d399b..f5b49b6193f 100644 --- a/pkg/streamingpromql/types/hpoint_ring_buffer.go +++ b/pkg/streamingpromql/types/hpoint_ring_buffer.go @@ -8,6 +8,7 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/grafana/mimir/pkg/streamingpromql/limiting" + "github.com/grafana/mimir/pkg/util/pool" ) // FPointRingBuffer and HPointRingBuffer are nearly identical, but exist for each @@ -121,7 +122,7 @@ func (b *HPointRingBuffer) NextPoint() (*promql.HPoint, error) { return nil, err } - if !isPowerOfTwo(cap(newSlice)) { + if !pool.IsPowerOfTwo(cap(newSlice)) { // We rely on the capacity being a power of two for the pointsIndexMask optimisation below. // If we can guarantee that newSlice has a capacity that is a power of two in the future, then we can drop this check. return nil, fmt.Errorf("pool returned slice of capacity %v (requested %v), but wanted a power of two", cap(newSlice), newSize) @@ -185,7 +186,7 @@ func (b *HPointRingBuffer) Release() { // should not return s to the pool themselves. // s must have a capacity that is a power of two. func (b *HPointRingBuffer) Use(s []promql.HPoint) error { - if !isPowerOfTwo(cap(s)) { + if !pool.IsPowerOfTwo(cap(s)) { // We rely on the capacity being a power of two for the pointsIndexMask optimisation below. return fmt.Errorf("slice capacity must be a power of two, but is %v", cap(s)) } diff --git a/pkg/streamingpromql/types/limiting_pool.go b/pkg/streamingpromql/types/limiting_pool.go index 2a614f4b798..6e2a01af8e8 100644 --- a/pkg/streamingpromql/types/limiting_pool.go +++ b/pkg/streamingpromql/types/limiting_pool.go @@ -13,7 +13,9 @@ import ( ) const ( - MaxExpectedPointsPerSeries = 131_072 // There's not too much science behind this number: 100,000 points allows for a point per minute for just under 70 days. Then we use the next power of two. + // There's not too much science behind this number: 100,000 points allows for a point per minute for just under 70 days. + // Then we use the next power of two, given the pools always return slices with capacity equal to a power of two. + MaxExpectedPointsPerSeries = 131_072 // Treat a native histogram sample as equivalent to this many float samples when considering max in-memory bytes limit. // Keep in mind that float sample = timestamp + float value, so 5x this is equivalent to five timestamps and five floats. diff --git a/pkg/streamingpromql/types/limiting_pool_test.go b/pkg/streamingpromql/types/limiting_pool_test.go index d8340b392b4..94212fe1581 100644 --- a/pkg/streamingpromql/types/limiting_pool_test.go +++ b/pkg/streamingpromql/types/limiting_pool_test.go @@ -25,7 +25,7 @@ func TestLimitingBucketedPool_Unlimited(t *testing.T) { tracker := limiting.NewMemoryConsumptionTracker(0, metric) p := NewLimitingBucketedPool( - pool.NewBucketedPool(1000, func(size int) []promql.FPoint { return make([]promql.FPoint, 0, size) }), + pool.NewBucketedPool(1024, func(size int) []promql.FPoint { return make([]promql.FPoint, 0, size) }), FPointSize, false, nil, @@ -78,7 +78,7 @@ func TestLimitingPool_Limited(t *testing.T) { tracker := limiting.NewMemoryConsumptionTracker(limit, metric) p := NewLimitingBucketedPool( - pool.NewBucketedPool(1000, func(size int) []promql.FPoint { return make([]promql.FPoint, 0, size) }), + pool.NewBucketedPool(1024, func(size int) []promql.FPoint { return make([]promql.FPoint, 0, size) }), FPointSize, false, nil, @@ -203,7 +203,7 @@ func TestLimitingPool_Mangling(t *testing.T) { tracker := limiting.NewMemoryConsumptionTracker(0, metric) p := NewLimitingBucketedPool( - pool.NewBucketedPool(1000, func(size int) []int { return make([]int, 0, size) }), + pool.NewBucketedPool(1024, func(size int) []int { return make([]int, 0, size) }), 1, false, func(_ int) int { return 123 }, @@ -228,99 +228,10 @@ func TestLimitingPool_Mangling(t *testing.T) { require.Equal(t, []int{123, 123, 123, 123}, s, "returned slice should be mangled when mangling is enabled") } -func TestLimitingBucketedPool_PowerOfTwoCapacities(t *testing.T) { - memoryConsumptionTracker := limiting.NewMemoryConsumptionTracker(0, nil) - - pool := NewLimitingBucketedPool( - pool.NewBucketedPool(100_000, func(size int) []int { return make([]int, 0, size) }), - 1, - false, - nil, - ) - - cases := []struct { - requestedSize int - expectedCap int - }{ - {3, 4}, - {5, 8}, - {10, 16}, - {65_000, 65_536}, - {100_001, 131_072}, // Exceeds max, expect next power of two - } - - for _, c := range cases { - slice, err := pool.Get(c.requestedSize, memoryConsumptionTracker) - require.NoError(t, err, "Unexpected error when requesting size %d", c.requestedSize) - require.Equal(t, c.expectedCap, cap(slice), - "LimitingBucketedPool.Get() returned slice with capacity %d; expected %d", cap(slice), c.expectedCap) - pool.Put(slice, memoryConsumptionTracker) - } -} - -func TestLimitingBucketedPool_UnreasonableSizeRequest(t *testing.T) { - const maxMemoryLimit = 1_000_000 * FPointSize - - reg, metric := createRejectedMetric() - memoryConsumptionTracker := limiting.NewMemoryConsumptionTracker(uint64(maxMemoryLimit), metric) - - pool := NewLimitingBucketedPool( - pool.NewBucketedPool(100_000, func(size int) []int { return make([]int, 0, size) }), - 1, - false, - nil, - ) - - // Request a reasonable size - slice, err := pool.Get(500_000, memoryConsumptionTracker) - require.NoError(t, err, "Expected to succeed for reasonable size request") - require.Equal(t, 524_288, cap(slice), "Capacity should be next power of two") - assertRejectedQueryCount(t, reg, 0) - - pool.Put(slice, memoryConsumptionTracker) - - // Request an unreasonable size - _, err = pool.Get(10_000_000, memoryConsumptionTracker) - require.Error(t, err, "Expected an error for unreasonably large size request") - require.Contains(t, err.Error(), "exceeded", "Error message should indicate memory consumption limit exceeded") - assertRejectedQueryCount(t, reg, 1) - - require.Equal(t, uint64(0), memoryConsumptionTracker.CurrentEstimatedMemoryConsumptionBytes, - "Current memory consumption should remain at 0 after rejected request") -} - func TestLimitingBucketedPool_MaxExpectedPointsPerSeriesConstantIsPowerOfTwo(t *testing.T) { // Although not strictly required (as the code should handle MaxExpectedPointsPerSeries not being a power of two correctly), // it is best that we keep it as one for now. - require.True(t, isPowerOfTwo(MaxExpectedPointsPerSeries), "MaxExpectedPointsPerSeries must be a power of two") -} - -func TestIsPowerOfTwo(t *testing.T) { - cases := []struct { - input int - expected bool - }{ - {-2, false}, - {1, true}, - {2, true}, - {3, false}, - {4, true}, - {5, false}, - {6, false}, - {7, false}, - {8, true}, - {16, true}, - {32, true}, - {1023, false}, - {1024, true}, - {1<<12 - 1, false}, - {1 << 12, true}, - } - - for _, c := range cases { - result := isPowerOfTwo(c.input) - require.Equalf(t, c.expected, result, "isPowerOfTwo(%d) should return %v", c.input, c.expected) - } + require.True(t, pool.IsPowerOfTwo(MaxExpectedPointsPerSeries), "MaxExpectedPointsPerSeries must be a power of two") } func assertRejectedQueryCount(t *testing.T, reg *prometheus.Registry, expectedRejectionCount int) { diff --git a/pkg/streamingpromql/types/pool.go b/pkg/streamingpromql/types/pool.go index 4734524d81f..7eed11885a8 100644 --- a/pkg/streamingpromql/types/pool.go +++ b/pkg/streamingpromql/types/pool.go @@ -9,7 +9,9 @@ import ( ) const ( - maxExpectedSeriesPerResult = 10_000_000 // There's not too much science behind this number: this is the based on examining the largest queries seen at Grafana Labs. + // There's not too much science behind this number: this is the based on examining the largest queries seen at Grafana Labs. + // The number must also align with a power of two for our pools. + maxExpectedSeriesPerResult = 8_388_608 ) var ( diff --git a/pkg/streamingpromql/types/pool_test.go b/pkg/streamingpromql/types/pool_test.go new file mode 100644 index 00000000000..fa5242f3b3d --- /dev/null +++ b/pkg/streamingpromql/types/pool_test.go @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/grafana/mimir/pkg/util/pool" +) + +func TestMaxExpectedSeriesPerResultConstantIsPowerOfTwo(t *testing.T) { + // Although not strictly required (as the code should handle maxExpectedSeriesPerResult not being a power of two correctly), + // it is best that we keep it as one for now. + require.True(t, pool.IsPowerOfTwo(maxExpectedSeriesPerResult), "maxExpectedSeriesPerResult must be a power of two") +} diff --git a/pkg/streamingpromql/types/ring_buffer_test.go b/pkg/streamingpromql/types/ring_buffer_test.go index 87b44f4487b..cb42d45ac65 100644 --- a/pkg/streamingpromql/types/ring_buffer_test.go +++ b/pkg/streamingpromql/types/ring_buffer_test.go @@ -135,6 +135,11 @@ func testRingBuffer[T any](t *testing.T, buf ringBuffer[T], points []T) { err = buf.Use(subsliceWithPowerOfTwoCapacity) require.NoError(t, err) shouldHavePoints(t, buf, points[4:]...) + + nonPowerOfTwoSlice := make([]T, 0, 15) + err = buf.Use(nonPowerOfTwoSlice) + require.EqualError(t, err, "slice capacity must be a power of two, but is 15", + "Error message should indicate the invalid capacity") } func TestRingBuffer_DiscardPointsBefore_ThroughWrapAround(t *testing.T) { @@ -502,27 +507,3 @@ func setupRingBufferTestingPools(t *testing.T) { putHPointSliceForRingBuffer = originalPutHPointSlice }) } - -func TestFPointRingBuffer_UseReturnsErrorOnNonPowerOfTwoSlice(t *testing.T) { - memoryConsumptionTracker := limiting.NewMemoryConsumptionTracker(0, nil) - buf := NewFPointRingBuffer(memoryConsumptionTracker) - - nonPowerOfTwoSlice := make([]promql.FPoint, 0, 15) - - err := buf.Use(nonPowerOfTwoSlice) - require.Error(t, err, "Use() should return an error for a non-power-of-two slice") - require.EqualError(t, err, "slice capacity must be a power of two, but is 15", - "Error message should indicate the invalid capacity") -} - -func TestHPointRingBuffer_UseReturnsErrorOnNonPowerOfTwoSlice(t *testing.T) { - memoryConsumptionTracker := limiting.NewMemoryConsumptionTracker(0, nil) - buf := NewHPointRingBuffer(memoryConsumptionTracker) - - nonPowerOfTwoSlice := make([]promql.HPoint, 0, 15) - - err := buf.Use(nonPowerOfTwoSlice) - require.Error(t, err, "Use() should return an error for a non-power-of-two slice") - require.EqualError(t, err, "slice capacity must be a power of two, but is 15", - "Error message should indicate the invalid capacity") -} diff --git a/pkg/util/pool/bucketed_pool.go b/pkg/util/pool/bucketed_pool.go index 37a01f096b9..8bb382b95e9 100644 --- a/pkg/util/pool/bucketed_pool.go +++ b/pkg/util/pool/bucketed_pool.go @@ -28,6 +28,8 @@ type BucketedPool[T ~[]E, E any] struct { func NewBucketedPool[T ~[]E, E any](maxSize uint, makeFunc func(int) T) *BucketedPool[T, E] { if maxSize <= 1 { panic("invalid maximum pool size") + } else if !IsPowerOfTwo(int(maxSize)) { + panic("bucket maxSize is not a power of two") } bucketCount := bits.Len(maxSize) @@ -42,9 +44,8 @@ func NewBucketedPool[T ~[]E, E any](maxSize uint, makeFunc func(int) T) *Buckete } // Get returns a new slice with capacity greater than or equal to size. -// If no bucket large enough exists, a slice larger than the requested size -// of the next power of two is returned. -// Get guarantees the resulting slice always has a capacity in power of twos. +// The resulting slice always has a capacity that is a power of two. +// If size is greater than maxSize, then a slice is still returned, however it may not be drawn from a pool. func (p *BucketedPool[T, E]) Get(size int) T { if size < 0 { panic(fmt.Sprintf("BucketedPool.Get with negative size %v", size)) @@ -56,7 +57,7 @@ func (p *BucketedPool[T, E]) Get(size int) T { bucketIndex := bits.Len(uint(size - 1)) - // If bucketIndex exceeds the number of available buckets, return a slice of the next power of two. + // If the requested size is larger than the size of the largest bucket, return a slice of the next power of two greater than or equal to size. if bucketIndex >= len(p.buckets) { nextPowerOfTwo := 1 << bucketIndex return p.make(nextPowerOfTwo) @@ -83,10 +84,11 @@ func (p *BucketedPool[T, E]) Put(s T) { bucketIndex := bits.Len(size - 1) if bucketIndex >= len(p.buckets) { + // This should never happen as maxSize is checked above, and enforced to be a power of 2 return // Ignore slices larger than the largest bucket } - // Ignore slices that do not align to the current power of 2 + // Ignore slices with capacity that is not a power of 2 // (this will only happen where a slice did not originally come from the pool). if size != (1 << bucketIndex) { return @@ -94,3 +96,7 @@ func (p *BucketedPool[T, E]) Put(s T) { p.buckets[bucketIndex].Put(s[0:0]) } + +func IsPowerOfTwo(n int) bool { + return (n & (n - 1)) == 0 +} diff --git a/pkg/util/pool/bucketed_pool_test.go b/pkg/util/pool/bucketed_pool_test.go index a7183694d13..373bd430b3a 100644 --- a/pkg/util/pool/bucketed_pool_test.go +++ b/pkg/util/pool/bucketed_pool_test.go @@ -64,7 +64,7 @@ func TestBucketedPool_HappyPath(t *testing.T) { } runTests := func(t *testing.T, returnToPool bool) { - testPool := NewBucketedPool(19, makeFunc) + testPool := NewBucketedPool(16, makeFunc) for _, c := range cases { ret := testPool.Get(c.size) require.Equal(t, c.expectedCap, cap(ret)) @@ -91,7 +91,7 @@ func TestBucketedPool_HappyPath(t *testing.T) { } func TestBucketedPool_SliceNotAlignedToBuckets(t *testing.T) { - pool := NewBucketedPool(1000, makeFunc) + pool := NewBucketedPool(1024, makeFunc) pool.Put(make([]int, 0, 5)) s := pool.Get(6) require.Equal(t, 8, cap(s)) @@ -99,7 +99,7 @@ func TestBucketedPool_SliceNotAlignedToBuckets(t *testing.T) { } func TestBucketedPool_PutEmptySlice(t *testing.T) { - pool := NewBucketedPool(1000, makeFunc) + pool := NewBucketedPool(1024, makeFunc) pool.Put([]int{}) s := pool.Get(1) require.Equal(t, 1, cap(s)) @@ -107,7 +107,7 @@ func TestBucketedPool_PutEmptySlice(t *testing.T) { } func TestBucketedPool_PutNilSlice(t *testing.T) { - pool := NewBucketedPool(1000, makeFunc) + pool := NewBucketedPool(1024, makeFunc) pool.Put(nil) s := pool.Get(1) require.Equal(t, 1, cap(s)) @@ -115,7 +115,7 @@ func TestBucketedPool_PutNilSlice(t *testing.T) { } func TestBucketedPool_PutSliceLargerThanMaximum(t *testing.T) { - pool := NewBucketedPool(100, makeFunc) + pool := NewBucketedPool(64, makeFunc) s1 := make([]int, 101) pool.Put(s1) s2 := pool.Get(101)[:101] @@ -124,10 +124,10 @@ func TestBucketedPool_PutSliceLargerThanMaximum(t *testing.T) { } func TestBucketedPool_GetSizeCloseToMax(t *testing.T) { - maxSize := 100000 + maxSize := 131072 pool := NewBucketedPool(uint(maxSize), makeFunc) - // Request a size that triggers the last bucket boundary. + // Request a slice with size that will be drawn from the last bucket in the pool. s := pool.Get(86401) // Check that we still get a slice with the correct size. @@ -135,45 +135,30 @@ func TestBucketedPool_GetSizeCloseToMax(t *testing.T) { require.Len(t, s, 0) } -func TestBucketedPool_AlwaysReturnsPowerOfTwoCapacities(t *testing.T) { - pool := NewBucketedPool(100_000, makeFunc) - +func TestIsPowerOfTwo(t *testing.T) { cases := []struct { - requestedSize int - expectedCap int + input int + expected bool }{ - {3, 4}, - {5, 8}, - {10, 16}, - {20, 32}, - {65_000, 65_536}, - {100_001, 131_072}, // Exceeds max bucket: next power of two is 131,072 + {-2, false}, + {1, true}, + {2, true}, + {3, false}, + {4, true}, + {5, false}, + {6, false}, + {7, false}, + {8, true}, + {16, true}, + {32, true}, + {1023, false}, + {1024, true}, + {1<<12 - 1, false}, + {1 << 12, true}, } for _, c := range cases { - slice := pool.Get(c.requestedSize) - - require.Equal(t, c.expectedCap, cap(slice), - "BucketedPool.Get() returned slice with capacity %d; expected %d", cap(slice), c.expectedCap) - - pool.Put(slice) + result := IsPowerOfTwo(c.input) + require.Equalf(t, c.expected, result, "isPowerOfTwo(%d) should return %v", c.input, c.expected) } } - -func TestBucketedPool_PutSizeCloseToMax(t *testing.T) { - maxSize := 100000 - pool := NewBucketedPool(uint(maxSize), makeFunc) - - // Create a slice with capacity that triggers the upper edge case - s := make([]int, 0, 65_000) // 86401 is close to maxSize but not aligned to power of 2 - - // Ensure Put does not panic when adding this slice - require.NotPanics(t, func() { - pool.Put(s) - }, "Put should not panic for sizes close to maxSize") - - // Validate that a subsequent Get for a smaller size works fine - ret := pool.Get(1) - require.Equal(t, 1, cap(ret)) - require.Len(t, ret, 0) -} From e6653e7e251c20762de4dced4929837040e651a1 Mon Sep 17 00:00:00 2001 From: Charles Korn Date: Mon, 13 Jan 2025 16:34:02 +1100 Subject: [PATCH 13/18] Include more information in spans produced by read consistency round-tripper (#10412) * Include more information in spans produced by read consistency round-tripper * Add changelog entry --- CHANGELOG.md | 1 + pkg/frontend/querymiddleware/read_consistency.go | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fc5f0ffde44..7b9eecf1754 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ * [ENHANCEMENT] Dashboards: Add Query-Scheduler <-> Querier Inflight Requests row to Query Reads and Remote Ruler reads dashboards. #10290 * [ENHANCEMENT] OTLP: In addition to the flag `-distributor.otel-created-timestamp-zero-ingestion-enabled` there is now `-distributor.otel-start-time-quiet-zero` to convert OTel start timestamps to Prometheus QuietZeroNaNs. This flag is to make the change rollout safe between Ingesters and Distributors. #10238 * [ENHANCEMENT] Ruler: When rule concurrency is enabled for a rule group, its rules will now be reordered and run in batches based on their dependencies. This increases the number of rules that can potentially run concurrently. Note that the global and tenant-specific limits still apply #10400 +* [ENHANCEMENT] Query-frontend: include more information about read consistency in trace spans produced when using experimental ingest storage. #10412 * [BUGFIX] Distributor: Use a boolean to track changes while merging the ReplicaDesc components, rather than comparing the objects directly. #10185 * [BUGFIX] Querier: fix timeout responding to query-frontend when response size is very close to `-querier.frontend-client.grpc-max-send-msg-size`. #10154 * [BUGFIX] Query-frontend and querier: show warning/info annotations in some cases where they were missing (if a lazy querier was used). #10277 diff --git a/pkg/frontend/querymiddleware/read_consistency.go b/pkg/frontend/querymiddleware/read_consistency.go index 9c15ae0e025..1523c8617be 100644 --- a/pkg/frontend/querymiddleware/read_consistency.go +++ b/pkg/frontend/querymiddleware/read_consistency.go @@ -58,6 +58,7 @@ func (r *readConsistencyRoundTripper) RoundTrip(req *http.Request) (_ *http.Resp } if level != querierapi.ReadConsistencyStrong { + spanLog.DebugLog("msg", "evaluating query with eventually consistent read consistency") return r.next.RoundTrip(req) } @@ -76,10 +77,13 @@ func (r *readConsistencyRoundTripper) RoundTrip(req *http.Request) (_ *http.Resp return errors.Wrapf(err, "wait for last produced offsets of topic '%s'", offsetsReader.Topic()) } + headerValue := string(querierapi.EncodeOffsets(offsets)) reqHeaderLock.Lock() - req.Header.Add(headerKey, string(querierapi.EncodeOffsets(offsets))) + req.Header.Add(headerKey, headerValue) reqHeaderLock.Unlock() + spanLog.DebugLog("msg", "got offsets for strong read consistency", "header", headerKey, "value", headerValue) + return nil }) } @@ -88,6 +92,8 @@ func (r *readConsistencyRoundTripper) RoundTrip(req *http.Request) (_ *http.Resp return nil, err } + spanLog.DebugLog("msg", "evaluating query with strong read consistency") + return r.next.RoundTrip(req) } From 633eea15332785ff7a08c92fb8430d21790a0fbc Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 08:09:49 +0100 Subject: [PATCH 14/18] chore(deps): update alpine docker tag to v3.21.2 (#10417) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- cmd/metaconvert/Dockerfile | 2 +- cmd/mimir/Dockerfile.alpine | 2 +- cmd/mimir/Dockerfile.continuous-test | 2 +- cmd/mimirtool/Dockerfile | 2 +- cmd/query-tee/Dockerfile | 2 +- development/mimir-ingest-storage/dev.dockerfile | 2 +- development/mimir-microservices-mode/dev.dockerfile | 2 +- .../mimir-monolithic-mode-with-swift-storage/dev.dockerfile | 2 +- development/mimir-monolithic-mode/dev.dockerfile | 2 +- development/mimir-read-write-mode/dev.dockerfile | 2 +- tools/copyblocks/Dockerfile | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/cmd/metaconvert/Dockerfile b/cmd/metaconvert/Dockerfile index 67358ebb282..0dbea42eca9 100644 --- a/cmd/metaconvert/Dockerfile +++ b/cmd/metaconvert/Dockerfile @@ -3,7 +3,7 @@ # Provenance-includes-license: Apache-2.0 # Provenance-includes-copyright: The Cortex Authors. -FROM alpine:3.21.0 +FROM alpine:3.21.2 ARG EXTRA_PACKAGES RUN apk add --no-cache ca-certificates tzdata $EXTRA_PACKAGES # Expose TARGETOS and TARGETARCH variables. These are supported by Docker when using BuildKit, but must be "enabled" using ARG. diff --git a/cmd/mimir/Dockerfile.alpine b/cmd/mimir/Dockerfile.alpine index 156a419c827..351316dcbcc 100644 --- a/cmd/mimir/Dockerfile.alpine +++ b/cmd/mimir/Dockerfile.alpine @@ -3,7 +3,7 @@ # Provenance-includes-license: Apache-2.0 # Provenance-includes-copyright: The Cortex Authors. -FROM alpine:3.21.0 +FROM alpine:3.21.2 ARG EXTRA_PACKAGES RUN apk add --no-cache ca-certificates tzdata $EXTRA_PACKAGES # Expose TARGETOS and TARGETARCH variables. These are supported by Docker when using BuildKit, but must be "enabled" using ARG. diff --git a/cmd/mimir/Dockerfile.continuous-test b/cmd/mimir/Dockerfile.continuous-test index 65187bf7c3b..7207b78f440 100644 --- a/cmd/mimir/Dockerfile.continuous-test +++ b/cmd/mimir/Dockerfile.continuous-test @@ -1,6 +1,6 @@ # SPDX-License-Identifier: AGPL-3.0-only -FROM alpine:3.21.0 +FROM alpine:3.21.2 ARG EXTRA_PACKAGES RUN apk add --no-cache ca-certificates tzdata $EXTRA_PACKAGES # Expose TARGETOS and TARGETARCH variables. These are supported by Docker when using BuildKit, but must be "enabled" using ARG. diff --git a/cmd/mimirtool/Dockerfile b/cmd/mimirtool/Dockerfile index 73653218be3..794361ce6bd 100644 --- a/cmd/mimirtool/Dockerfile +++ b/cmd/mimirtool/Dockerfile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: Apache-2.0 -FROM alpine:3.21.0 +FROM alpine:3.21.2 ARG EXTRA_PACKAGES RUN apk add --no-cache ca-certificates tzdata $EXTRA_PACKAGES # Expose TARGETOS and TARGETARCH variables. These are supported by Docker when using BuildKit, but must be "enabled" using ARG. diff --git a/cmd/query-tee/Dockerfile b/cmd/query-tee/Dockerfile index 8584716ef13..ae7f4175445 100644 --- a/cmd/query-tee/Dockerfile +++ b/cmd/query-tee/Dockerfile @@ -3,7 +3,7 @@ # Provenance-includes-license: Apache-2.0 # Provenance-includes-copyright: The Cortex Authors. -FROM alpine:3.21.0 +FROM alpine:3.21.2 ARG EXTRA_PACKAGES RUN apk add --no-cache ca-certificates tzdata $EXTRA_PACKAGES # Expose TARGETOS and TARGETARCH variables. These are supported by Docker when using BuildKit, but must be "enabled" using ARG. diff --git a/development/mimir-ingest-storage/dev.dockerfile b/development/mimir-ingest-storage/dev.dockerfile index 2572deb6dab..320ee56e4d0 100644 --- a/development/mimir-ingest-storage/dev.dockerfile +++ b/development/mimir-ingest-storage/dev.dockerfile @@ -3,7 +3,7 @@ FROM $BUILD_IMAGE ENV CGO_ENABLED=0 RUN go install github.com/go-delve/delve/cmd/dlv@v1.22.0 -FROM alpine:3.21.0 +FROM alpine:3.21.2 RUN mkdir /mimir WORKDIR /mimir diff --git a/development/mimir-microservices-mode/dev.dockerfile b/development/mimir-microservices-mode/dev.dockerfile index 63eea13f8e6..ac1b3d3c3f9 100644 --- a/development/mimir-microservices-mode/dev.dockerfile +++ b/development/mimir-microservices-mode/dev.dockerfile @@ -3,7 +3,7 @@ FROM $BUILD_IMAGE ENV CGO_ENABLED=0 RUN go install github.com/go-delve/delve/cmd/dlv@v1.23.0 -FROM alpine:3.21.0 +FROM alpine:3.21.2 RUN mkdir /mimir WORKDIR /mimir diff --git a/development/mimir-monolithic-mode-with-swift-storage/dev.dockerfile b/development/mimir-monolithic-mode-with-swift-storage/dev.dockerfile index 56b8fccb452..046e4316d78 100644 --- a/development/mimir-monolithic-mode-with-swift-storage/dev.dockerfile +++ b/development/mimir-monolithic-mode-with-swift-storage/dev.dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.21.0 +FROM alpine:3.21.2 RUN mkdir /mimir WORKDIR /mimir diff --git a/development/mimir-monolithic-mode/dev.dockerfile b/development/mimir-monolithic-mode/dev.dockerfile index 8f5ba3f70ee..b42d07003be 100644 --- a/development/mimir-monolithic-mode/dev.dockerfile +++ b/development/mimir-monolithic-mode/dev.dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.21.0 +FROM alpine:3.21.2 RUN mkdir /mimir WORKDIR /mimir diff --git a/development/mimir-read-write-mode/dev.dockerfile b/development/mimir-read-write-mode/dev.dockerfile index 2572deb6dab..320ee56e4d0 100644 --- a/development/mimir-read-write-mode/dev.dockerfile +++ b/development/mimir-read-write-mode/dev.dockerfile @@ -3,7 +3,7 @@ FROM $BUILD_IMAGE ENV CGO_ENABLED=0 RUN go install github.com/go-delve/delve/cmd/dlv@v1.22.0 -FROM alpine:3.21.0 +FROM alpine:3.21.2 RUN mkdir /mimir WORKDIR /mimir diff --git a/tools/copyblocks/Dockerfile b/tools/copyblocks/Dockerfile index 2eb4721b704..d0b03e663f4 100644 --- a/tools/copyblocks/Dockerfile +++ b/tools/copyblocks/Dockerfile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: AGPL-3.0-only -FROM alpine:3.21.0 +FROM alpine:3.21.2 ARG EXTRA_PACKAGES RUN apk add --no-cache ca-certificates tzdata $EXTRA_PACKAGES # Expose TARGETOS and TARGETARCH variables. These are supported by Docker when using BuildKit, but must be "enabled" using ARG. From ecfbdf999a8d2159f5ef478c50db940e348c5887 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 07:37:57 +0000 Subject: [PATCH 15/18] Update google.golang.org/genproto/googleapis/rpc digest to 5f5ef82 (#10416) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- vendor/modules.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index f2bd093067b..216bd94d416 100644 --- a/go.mod +++ b/go.mod @@ -281,7 +281,7 @@ require ( golang.org/x/tools v0.28.0 // indirect google.golang.org/genproto v0.0.0-20241113202542-65e8d215514f // indirect google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d + google.golang.org/genproto/googleapis/rpc v0.0.0-20250106144421-5f5ef82da422 k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/go.sum b/go.sum index 918a1ec5fb7..80d9217ec87 100644 --- a/go.sum +++ b/go.sum @@ -2525,8 +2525,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d h1:xJJRGY7TJcvIlpSrN3K6LAWgNFUILlO+OMAqtg9aqnw= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250106144421-5f5ef82da422 h1:3UsHvIr4Wc2aW4brOaSCmcxh9ksica6fHEr8P1XhkYw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250106144421-5f5ef82da422/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4= google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= diff --git a/vendor/modules.txt b/vendor/modules.txt index 809e3eacc42..a4ef5f7a251 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1504,7 +1504,7 @@ google.golang.org/genproto/googleapis/type/expr ## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations -# google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250106144421-5f5ef82da422 ## explicit; go 1.22 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails From dee581e492bb4384a9c2a4cf1e34b0f0c2e242d2 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 08:45:05 +0100 Subject: [PATCH 16/18] fix(deps): update module google.golang.org/protobuf to v1.36.2 (#10419) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- .../protobuf/internal/impl/message_opaque.go | 24 ++++++++++++++++--- .../protobuf/internal/version/version.go | 2 +- vendor/modules.txt | 2 +- 5 files changed, 26 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 216bd94d416..ec9819a47b8 100644 --- a/go.mod +++ b/go.mod @@ -90,7 +90,7 @@ require ( go.uber.org/multierr v1.11.0 golang.org/x/term v0.27.0 google.golang.org/api v0.213.0 - google.golang.org/protobuf v1.36.1 + google.golang.org/protobuf v1.36.2 sigs.k8s.io/kustomize/kyaml v0.18.1 ) diff --git a/go.sum b/go.sum index 80d9217ec87..1a82df69453 100644 --- a/go.sum +++ b/go.sum @@ -2551,8 +2551,8 @@ google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= -google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU= +google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go index d407dd791e8..d7ec53f074a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go @@ -88,9 +88,7 @@ func opaqueInitHook(mi *MessageInfo) bool { mi.oneofs = map[protoreflect.Name]*oneofInfo{} for i := 0; i < mi.Desc.Oneofs().Len(); i++ { od := mi.Desc.Oneofs().Get(i) - if !od.IsSynthetic() { - mi.oneofs[od.Name()] = makeOneofInfo(od, si.structInfo, mi.Exporter) - } + mi.oneofs[od.Name()] = makeOneofInfoOpaque(mi, od, si.structInfo, mi.Exporter) } mi.denseFields = make([]*fieldInfo, fds.Len()*2) @@ -119,6 +117,26 @@ func opaqueInitHook(mi *MessageInfo) bool { return true } +func makeOneofInfoOpaque(mi *MessageInfo, od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo { + oi := &oneofInfo{oneofDesc: od} + if od.IsSynthetic() { + fd := od.Fields().Get(0) + index, _ := presenceIndex(mi.Desc, fd) + oi.which = func(p pointer) protoreflect.FieldNumber { + if p.IsNil() { + return 0 + } + if !mi.present(p, index) { + return 0 + } + return od.Fields().Get(0).Number() + } + return oi + } + // Dispatch to non-opaque oneof implementation for non-synthetic oneofs. + return makeOneofInfo(od, si, x) +} + func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Map { diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 3018450df79..386c823aa64 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 1 + Patch = 2 PreRelease = "" ) diff --git a/vendor/modules.txt b/vendor/modules.txt index a4ef5f7a251..7f64a16e75f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1579,7 +1579,7 @@ google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap google.golang.org/grpc/test/bufconn -# google.golang.org/protobuf v1.36.1 +# google.golang.org/protobuf v1.36.2 ## explicit; go 1.21 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson From 62be9ae488ed10063297f9b40f8f808956e7cab7 Mon Sep 17 00:00:00 2001 From: "Grot (@grafanabot)" <43478413+grafanabot@users.noreply.github.com> Date: Mon, 13 Jan 2025 11:26:58 +0100 Subject: [PATCH 17/18] Update mimir-distributed chart to 5.6.0-weekly.325 (#10421) Co-authored-by: grafanabot --- operations/helm/charts/mimir-distributed/Chart.yaml | 4 ++-- operations/helm/charts/mimir-distributed/README.md | 4 ++-- operations/helm/charts/mimir-distributed/values.yaml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/operations/helm/charts/mimir-distributed/Chart.yaml b/operations/helm/charts/mimir-distributed/Chart.yaml index 0b8ca6afec7..3bfbb12bcf3 100644 --- a/operations/helm/charts/mimir-distributed/Chart.yaml +++ b/operations/helm/charts/mimir-distributed/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 -version: 5.6.0-weekly.324 -appVersion: r324 +version: 5.6.0-weekly.325 +appVersion: r325 description: "Grafana Mimir" home: https://grafana.com/docs/helm-charts/mimir-distributed/latest/ icon: https://grafana.com/static/img/logos/logo-mimir.svg diff --git a/operations/helm/charts/mimir-distributed/README.md b/operations/helm/charts/mimir-distributed/README.md index 418e9b9e23b..28552413a25 100644 --- a/operations/helm/charts/mimir-distributed/README.md +++ b/operations/helm/charts/mimir-distributed/README.md @@ -4,7 +4,7 @@ Helm chart for deploying [Grafana Mimir](https://grafana.com/docs/mimir/latest/) For the full documentation, visit [Grafana mimir-distributed Helm chart documentation](https://grafana.com/docs/helm-charts/mimir-distributed/latest/). -> **Note:** The documentation version is derived from the Helm chart version which is 5.6.0-weekly.324. +> **Note:** The documentation version is derived from the Helm chart version which is 5.6.0-weekly.325. When upgrading from Helm chart version 4.X, please see [Migrate the Helm chart from version 4.x to 5.0](https://grafana.com/docs/helm-charts/mimir-distributed/latest/migration-guides/migrate-helm-chart-4.x-to-5.0/). When upgrading from Helm chart version 3.x, please see [Migrate from single zone to zone-aware replication with Helm](https://grafana.com/docs/helm-charts/mimir-distributed/latest/migration-guides/migrate-from-single-zone-with-helm/). @@ -14,7 +14,7 @@ When upgrading from Helm chart version 2.1, please see [Upgrade the Grafana Mimi # mimir-distributed -![Version: 5.6.0-weekly.324](https://img.shields.io/badge/Version-5.6.0--weekly.324-informational?style=flat-square) ![AppVersion: r324](https://img.shields.io/badge/AppVersion-r324-informational?style=flat-square) +![Version: 5.6.0-weekly.325](https://img.shields.io/badge/Version-5.6.0--weekly.325-informational?style=flat-square) ![AppVersion: r325](https://img.shields.io/badge/AppVersion-r325-informational?style=flat-square) Grafana Mimir diff --git a/operations/helm/charts/mimir-distributed/values.yaml b/operations/helm/charts/mimir-distributed/values.yaml index ff223192cdd..21fb5f4bfd2 100644 --- a/operations/helm/charts/mimir-distributed/values.yaml +++ b/operations/helm/charts/mimir-distributed/values.yaml @@ -34,7 +34,7 @@ image: # -- Grafana Mimir container image repository. Note: for Grafana Enterprise Metrics use the value 'enterprise.image.repository' repository: grafana/mimir # -- Grafana Mimir container image tag. Note: for Grafana Enterprise Metrics use the value 'enterprise.image.tag' - tag: r324-9e94f1c + tag: r325-ed3160e # -- Container pull policy - shared between Grafana Mimir and Grafana Enterprise Metrics pullPolicy: IfNotPresent # -- Optionally specify an array of imagePullSecrets - shared between Grafana Mimir and Grafana Enterprise Metrics @@ -4037,7 +4037,7 @@ enterprise: # -- Grafana Enterprise Metrics container image repository. Note: for Grafana Mimir use the value 'image.repository' repository: grafana/enterprise-metrics # -- Grafana Enterprise Metrics container image tag. Note: for Grafana Mimir use the value 'image.tag' - tag: r324-e2048427 + tag: r325-624b3501 # Note: pullPolicy and optional pullSecrets are set in toplevel 'image' section, not here # In order to use Grafana Enterprise Metrics features, you will need to provide the contents of your Grafana Enterprise Metrics From cb53c900c3aaaa021747f9a421ea974d3c46936a Mon Sep 17 00:00:00 2001 From: Jack Baldry Date: Mon, 13 Jan 2025 11:47:10 +0000 Subject: [PATCH 18/18] Fix syntax (#10422) Otherwise this can be parsed as a YAML tag --- .github/workflows/deploy-pr-preview.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/deploy-pr-preview.yml b/.github/workflows/deploy-pr-preview.yml index ea6db81daa1..5ff7fb10d56 100644 --- a/.github/workflows/deploy-pr-preview.yml +++ b/.github/workflows/deploy-pr-preview.yml @@ -11,7 +11,7 @@ on: jobs: deploy-pr-preview: - if: ! github.event.pull_request.head.repo.fork + if: ${{ ! github.event.pull_request.head.repo.fork }} uses: grafana/writers-toolkit/.github/workflows/deploy-preview.yml@main with: sha: ${{ github.event.pull_request.head.sha }}