From d9d8f342b7ee5b405e3c97fb51cda75643ce7cf1 Mon Sep 17 00:00:00 2001 From: eshitachandwani <59800922+eshitachandwani@users.noreply.github.com> Date: Wed, 9 Oct 2024 13:29:31 +0530 Subject: [PATCH 01/57] revert xds: return all ServerConfig dial options together (#7712) * revert xds: return all ServerConfig dial options together * revert - xdsclient: fix test build breakage --- internal/xds/bootstrap/bootstrap.go | 22 ++++++++----- xds/internal/xdsclient/transport/transport.go | 21 +++++++----- .../xdsclient/transport/transport_test.go | 32 +++++-------------- 3 files changed, 35 insertions(+), 40 deletions(-) diff --git a/internal/xds/bootstrap/bootstrap.go b/internal/xds/bootstrap/bootstrap.go index 35aeea701a92..c725bc1eac97 100644 --- a/internal/xds/bootstrap/bootstrap.go +++ b/internal/xds/bootstrap/bootstrap.go @@ -220,14 +220,20 @@ func (sc *ServerConfig) ServerFeaturesIgnoreResourceDeletion() bool { return false } -// DialOptions returns a slice of all the configured dial options for this -// server. -func (sc *ServerConfig) DialOptions() []grpc.DialOption { - dopts := []grpc.DialOption{sc.credsDialOption} - if sc.dialerOption != nil { - dopts = append(dopts, sc.dialerOption) - } - return dopts +// CredsDialOption returns the first supported transport credentials from the +// configuration, as a dial option. +func (sc *ServerConfig) CredsDialOption() grpc.DialOption { + return sc.credsDialOption +} + +// DialerOption returns the Dialer function that specifies how to dial the xDS +// server determined by the first supported credentials from the configuration, +// as a dial option. +// +// TODO(https://github.com/grpc/grpc-go/issues/7661): change ServerConfig type +// to have a single method that returns all configured dial options. +func (sc *ServerConfig) DialerOption() grpc.DialOption { + return sc.dialerOption } // Cleanups returns a collection of functions to be called when the xDS client diff --git a/xds/internal/xdsclient/transport/transport.go b/xds/internal/xdsclient/transport/transport.go index 59b221727a1f..134a9519f19f 100644 --- a/xds/internal/xdsclient/transport/transport.go +++ b/xds/internal/xdsclient/transport/transport.go @@ -192,14 +192,19 @@ func New(opts Options) (*Transport, error) { return nil, errors.New("missing OnSend callback handler when creating a new transport") } - // Dial the xDS management server with dial options specified by the server - // configuration and a static keepalive configuration that is common across - // gRPC language implementations. - kpCfg := grpc.WithKeepaliveParams(keepalive.ClientParameters{ - Time: 5 * time.Minute, - Timeout: 20 * time.Second, - }) - dopts := append([]grpc.DialOption{kpCfg}, opts.ServerCfg.DialOptions()...) + // Dial the xDS management with the passed in credentials. + dopts := []grpc.DialOption{ + opts.ServerCfg.CredsDialOption(), + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + // We decided to use these sane defaults in all languages, and + // kicked the can down the road as far making these configurable. + Time: 5 * time.Minute, + Timeout: 20 * time.Second, + }), + } + if dialerOpts := opts.ServerCfg.DialerOption(); dialerOpts != nil { + dopts = append(dopts, dialerOpts) + } grpcNewClient := transportinternal.GRPCNewClient.(func(string, ...grpc.DialOption) (*grpc.ClientConn, error)) cc, err := grpcNewClient(opts.ServerCfg.ServerURI(), dopts...) if err != nil { diff --git a/xds/internal/xdsclient/transport/transport_test.go b/xds/internal/xdsclient/transport/transport_test.go index b217fd7430d9..7aac0ccdbb8b 100644 --- a/xds/internal/xdsclient/transport/transport_test.go +++ b/xds/internal/xdsclient/transport/transport_test.go @@ -22,7 +22,6 @@ import ( "encoding/json" "net" "testing" - "time" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -44,8 +43,6 @@ func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } -const defaultTestTimeout = 10 * time.Second - var noopRecvHandler = func(_ transport.ResourceUpdate, onDone func()) error { onDone() return nil @@ -111,17 +108,10 @@ const testDialerCredsBuilderName = "test_dialer_creds" // testDialerCredsBuilder implements the `Credentials` interface defined in // package `xds/bootstrap` and encapsulates an insecure credential with a // custom Dialer that specifies how to dial the xDS server. -type testDialerCredsBuilder struct { - // Closed with the custom Dialer is invoked. - // Needs to be passed in by the test. - dialCalled chan struct{} -} +type testDialerCredsBuilder struct{} func (t *testDialerCredsBuilder) Build(json.RawMessage) (credentials.Bundle, func(), error) { - return &testDialerCredsBundle{ - Bundle: insecure.NewBundle(), - dialCalled: t.dialCalled, - }, func() {}, nil + return &testDialerCredsBundle{insecure.NewBundle()}, func() {}, nil } func (t *testDialerCredsBuilder) Name() string { @@ -133,12 +123,10 @@ func (t *testDialerCredsBuilder) Name() string { // that specifies how to dial the xDS server. type testDialerCredsBundle struct { credentials.Bundle - dialCalled chan struct{} } -func (t *testDialerCredsBundle) Dialer(_ context.Context, address string) (net.Conn, error) { - close(t.dialCalled) - return net.Dial("tcp", address) +func (t *testDialerCredsBundle) Dialer(context.Context, string) (net.Conn, error) { + return nil, nil } func (s) TestNewWithDialerFromCredentialsBundle(t *testing.T) { @@ -152,8 +140,7 @@ func (s) TestNewWithDialerFromCredentialsBundle(t *testing.T) { internal.GRPCNewClient = customGRPCNewClient defer func() { internal.GRPCNewClient = oldGRPCNewClient }() - dialCalled := make(chan struct{}) - bootstrap.RegisterCredentials(&testDialerCredsBuilder{dialCalled: dialCalled}) + bootstrap.RegisterCredentials(&testDialerCredsBuilder{}) serverCfg, err := internalbootstrap.ServerConfigForTesting(internalbootstrap.ServerConfigTestingOptions{ URI: "trafficdirector.googleapis.com:443", ChannelCreds: []internalbootstrap.ChannelCreds{{Type: testDialerCredsBuilderName}}, @@ -161,7 +148,9 @@ func (s) TestNewWithDialerFromCredentialsBundle(t *testing.T) { if err != nil { t.Fatalf("Failed to create server config for testing: %v", err) } - + if serverCfg.DialerOption() == nil { + t.Fatalf("Dialer for xDS transport in server config for testing is nil, want non-nil") + } // Create a new transport. opts := transport.Options{ ServerCfg: serverCfg, @@ -182,11 +171,6 @@ func (s) TestNewWithDialerFromCredentialsBundle(t *testing.T) { if err != nil { t.Fatalf("transport.New(%v) failed: %v", opts, err) } - select { - case <-dialCalled: - case <-time.After(defaultTestTimeout): - t.Fatal("Timeout when waiting for Dialer() to be invoked") - } // Verify there are three dial options passed to the custom grpc.NewClient. // The first is opts.ServerCfg.CredsDialOption(), the second is // grpc.WithKeepaliveParams(), and the third is opts.ServerCfg.DialerOption() From b8ee37db62bef105fdbce649f0ee4ee8e78b1e46 Mon Sep 17 00:00:00 2001 From: Arjan Singh Bal <46515553+arjan-bal@users.noreply.github.com> Date: Wed, 9 Oct 2024 15:09:17 +0530 Subject: [PATCH 02/57] pickfirst: Move var for mocking the shuffle func from internal/internal to pickfirst/internal (#7698) --- balancer/pickfirst/internal/internal.go | 24 +++++++++ balancer/pickfirst/pickfirst.go | 5 +- .../pickfirst/pickfirst_ext_test.go | 50 +++++++++++++++---- internal/internal.go | 4 -- test/balancer_switching_test.go | 11 ++++ 5 files changed, 78 insertions(+), 16 deletions(-) create mode 100644 balancer/pickfirst/internal/internal.go rename test/pickfirst_test.go => balancer/pickfirst/pickfirst_ext_test.go (96%) diff --git a/balancer/pickfirst/internal/internal.go b/balancer/pickfirst/internal/internal.go new file mode 100644 index 000000000000..c51978945844 --- /dev/null +++ b/balancer/pickfirst/internal/internal.go @@ -0,0 +1,24 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains code internal to the pickfirst package. +package internal + +import "math/rand" + +// RandShuffle pseudo-randomizes the order of addresses. +var RandShuffle = rand.Shuffle diff --git a/balancer/pickfirst/pickfirst.go b/balancer/pickfirst/pickfirst.go index 8fadf3dfcdd7..3e792b2b366f 100644 --- a/balancer/pickfirst/pickfirst.go +++ b/balancer/pickfirst/pickfirst.go @@ -26,9 +26,9 @@ import ( "math/rand" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" @@ -37,7 +37,6 @@ import ( func init() { balancer.Register(pickfirstBuilder{}) - internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } } var logger = grpclog.Component("pick-first-lb") @@ -143,7 +142,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) - internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } // "Flatten the list by concatenating the ordered list of addresses for each diff --git a/test/pickfirst_test.go b/balancer/pickfirst/pickfirst_ext_test.go similarity index 96% rename from test/pickfirst_test.go rename to balancer/pickfirst/pickfirst_ext_test.go index a5fc77d4632f..faa6e7dd274e 100644 --- a/test/pickfirst_test.go +++ b/balancer/pickfirst/pickfirst_ext_test.go @@ -16,7 +16,7 @@ * */ -package test +package pickfirst_test import ( "context" @@ -28,11 +28,13 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/backoff" + pfinternal "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/pickfirst" @@ -45,7 +47,38 @@ import ( testpb "google.golang.org/grpc/interop/grpc_testing" ) -const pickFirstServiceConfig = `{"loadBalancingConfig": [{"pick_first":{}}]}` +const ( + pickFirstServiceConfig = `{"loadBalancingConfig": [{"pick_first":{}}]}` + // Default timeout for tests in this package. + defaultTestTimeout = 10 * time.Second + // Default short timeout, to be used when waiting for events which are not + // expected to happen. + defaultTestShortTimeout = 100 * time.Millisecond +) + +func init() { + channelz.TurnOn() +} + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// parseServiceConfig is a test helper which uses the manual resolver to parse +// the given service config. It calls t.Fatal() if service config parsing fails. +func parseServiceConfig(t *testing.T, r *manual.Resolver, sc string) *serviceconfig.ParseResult { + t.Helper() + + scpr := r.CC.ParseServiceConfig(sc) + if scpr.Err != nil { + t.Fatalf("Failed to parse service config %q: %v", sc, scpr.Err) + } + return scpr +} // setupPickFirst performs steps required for pick_first tests. It starts a // bunch of backends exporting the TestService, creates a ClientConn to them @@ -377,16 +410,15 @@ func (s) TestPickFirst_ShuffleAddressList(t *testing.T) { const serviceConfig = `{"loadBalancingConfig": [{"pick_first":{ "shuffleAddressList": true }}]}` // Install a shuffler that always reverses two entries. - origShuf := internal.ShuffleAddressListForTesting - defer func() { internal.ShuffleAddressListForTesting = origShuf }() - internal.ShuffleAddressListForTesting = func(n int, f func(int, int)) { + origShuf := pfinternal.RandShuffle + defer func() { pfinternal.RandShuffle = origShuf }() + pfinternal.RandShuffle = func(n int, f func(int, int)) { if n != 2 { t.Errorf("Shuffle called with n=%v; want 2", n) return } f(0, 1) // reverse the two addresses } - // Set up our backends. cc, r, backends := setupPickFirst(t, 2) addrs := stubBackendsToResolverAddrs(backends) @@ -434,9 +466,9 @@ func (s) TestPickFirst_ShuffleAddressList(t *testing.T) { // Test config parsing with the env var turned on and off for various scenarios. func (s) TestPickFirst_ParseConfig_Success(t *testing.T) { // Install a shuffler that always reverses two entries. - origShuf := internal.ShuffleAddressListForTesting - defer func() { internal.ShuffleAddressListForTesting = origShuf }() - internal.ShuffleAddressListForTesting = func(n int, f func(int, int)) { + origShuf := pfinternal.RandShuffle + defer func() { pfinternal.RandShuffle = origShuf }() + pfinternal.RandShuffle = func(n int, f func(int, int)) { if n != 2 { t.Errorf("Shuffle called with n=%v; want 2", n) return diff --git a/internal/internal.go b/internal/internal.go index 5510b2fb4616..20b4dc3d3536 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -207,10 +207,6 @@ var ( // default resolver scheme. UserSetDefaultScheme = false - // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n - // is the number of elements. swap swaps the elements with indexes i and j. - ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) - // ConnectedAddress returns the connected address for a SubConnState. The // address is only valid if the state is READY. ConnectedAddress any // func (scs SubConnState) resolver.Address diff --git a/test/balancer_switching_test.go b/test/balancer_switching_test.go index f0bb600f9fa6..8074b59b3a47 100644 --- a/test/balancer_switching_test.go +++ b/test/balancer_switching_test.go @@ -46,6 +46,7 @@ const ( loadBalancedServicePort = 443 wantGRPCLBTraceDesc = `Channel switches to new LB policy "grpclb"` wantRoundRobinTraceDesc = `Channel switches to new LB policy "round_robin"` + pickFirstServiceConfig = `{"loadBalancingConfig": [{"pick_first":{}}]}` // This is the number of stub backends set up at the start of each test. The // first backend is used for the "grpclb" policy and the rest are used for @@ -53,6 +54,16 @@ const ( backendCount = 3 ) +// stubBackendsToResolverAddrs converts from a set of stub server backends to +// resolver addresses. Useful when pushing addresses to the manual resolver. +func stubBackendsToResolverAddrs(backends []*stubserver.StubServer) []resolver.Address { + addrs := make([]resolver.Address, len(backends)) + for i, backend := range backends { + addrs[i] = resolver.Address{Addr: backend.Address} + } + return addrs +} + // setupBackendsAndFakeGRPCLB sets up backendCount number of stub server // backends and a fake grpclb server for tests which exercise balancer switch // scenarios involving grpclb. From 4115c218d0c527d378bd2d611394d1659f870e80 Mon Sep 17 00:00:00 2001 From: eshitachandwani <59800922+eshitachandwani@users.noreply.github.com> Date: Thu, 10 Oct 2024 03:17:49 +0530 Subject: [PATCH 03/57] xds: return all ServerConfig dial options together (#7718) --- internal/xds/bootstrap/bootstrap.go | 22 ++--- xds/internal/xdsclient/transport/transport.go | 21 ++--- .../xdsclient/transport/transport_test.go | 83 ------------------- 3 files changed, 16 insertions(+), 110 deletions(-) diff --git a/internal/xds/bootstrap/bootstrap.go b/internal/xds/bootstrap/bootstrap.go index c725bc1eac97..35aeea701a92 100644 --- a/internal/xds/bootstrap/bootstrap.go +++ b/internal/xds/bootstrap/bootstrap.go @@ -220,20 +220,14 @@ func (sc *ServerConfig) ServerFeaturesIgnoreResourceDeletion() bool { return false } -// CredsDialOption returns the first supported transport credentials from the -// configuration, as a dial option. -func (sc *ServerConfig) CredsDialOption() grpc.DialOption { - return sc.credsDialOption -} - -// DialerOption returns the Dialer function that specifies how to dial the xDS -// server determined by the first supported credentials from the configuration, -// as a dial option. -// -// TODO(https://github.com/grpc/grpc-go/issues/7661): change ServerConfig type -// to have a single method that returns all configured dial options. -func (sc *ServerConfig) DialerOption() grpc.DialOption { - return sc.dialerOption +// DialOptions returns a slice of all the configured dial options for this +// server. +func (sc *ServerConfig) DialOptions() []grpc.DialOption { + dopts := []grpc.DialOption{sc.credsDialOption} + if sc.dialerOption != nil { + dopts = append(dopts, sc.dialerOption) + } + return dopts } // Cleanups returns a collection of functions to be called when the xDS client diff --git a/xds/internal/xdsclient/transport/transport.go b/xds/internal/xdsclient/transport/transport.go index 134a9519f19f..59b221727a1f 100644 --- a/xds/internal/xdsclient/transport/transport.go +++ b/xds/internal/xdsclient/transport/transport.go @@ -192,19 +192,14 @@ func New(opts Options) (*Transport, error) { return nil, errors.New("missing OnSend callback handler when creating a new transport") } - // Dial the xDS management with the passed in credentials. - dopts := []grpc.DialOption{ - opts.ServerCfg.CredsDialOption(), - grpc.WithKeepaliveParams(keepalive.ClientParameters{ - // We decided to use these sane defaults in all languages, and - // kicked the can down the road as far making these configurable. - Time: 5 * time.Minute, - Timeout: 20 * time.Second, - }), - } - if dialerOpts := opts.ServerCfg.DialerOption(); dialerOpts != nil { - dopts = append(dopts, dialerOpts) - } + // Dial the xDS management server with dial options specified by the server + // configuration and a static keepalive configuration that is common across + // gRPC language implementations. + kpCfg := grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 5 * time.Minute, + Timeout: 20 * time.Second, + }) + dopts := append([]grpc.DialOption{kpCfg}, opts.ServerCfg.DialOptions()...) grpcNewClient := transportinternal.GRPCNewClient.(func(string, ...grpc.DialOption) (*grpc.ClientConn, error)) cc, err := grpcNewClient(opts.ServerCfg.ServerURI(), dopts...) if err != nil { diff --git a/xds/internal/xdsclient/transport/transport_test.go b/xds/internal/xdsclient/transport/transport_test.go index 7aac0ccdbb8b..b51f58b742f5 100644 --- a/xds/internal/xdsclient/transport/transport_test.go +++ b/xds/internal/xdsclient/transport/transport_test.go @@ -18,17 +18,11 @@ package transport_test import ( - "context" - "encoding/json" - "net" "testing" "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" internalbootstrap "google.golang.org/grpc/internal/xds/bootstrap" - "google.golang.org/grpc/xds/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/transport" "google.golang.org/grpc/xds/internal/xdsclient/transport/internal" @@ -102,80 +96,3 @@ func (s) TestNewWithGRPCDial(t *testing.T) { t.Fatalf("transport.New(%+v) custom dialer called = true, want false", opts) } } - -const testDialerCredsBuilderName = "test_dialer_creds" - -// testDialerCredsBuilder implements the `Credentials` interface defined in -// package `xds/bootstrap` and encapsulates an insecure credential with a -// custom Dialer that specifies how to dial the xDS server. -type testDialerCredsBuilder struct{} - -func (t *testDialerCredsBuilder) Build(json.RawMessage) (credentials.Bundle, func(), error) { - return &testDialerCredsBundle{insecure.NewBundle()}, func() {}, nil -} - -func (t *testDialerCredsBuilder) Name() string { - return testDialerCredsBuilderName -} - -// testDialerCredsBundle implements the `Bundle` interface defined in package -// `credentials` and encapsulates an insecure credential with a custom Dialer -// that specifies how to dial the xDS server. -type testDialerCredsBundle struct { - credentials.Bundle -} - -func (t *testDialerCredsBundle) Dialer(context.Context, string) (net.Conn, error) { - return nil, nil -} - -func (s) TestNewWithDialerFromCredentialsBundle(t *testing.T) { - // Override grpc.NewClient with a custom one. - doptsLen := 0 - customGRPCNewClient := func(target string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { - doptsLen = len(opts) - return grpc.NewClient(target, opts...) - } - oldGRPCNewClient := internal.GRPCNewClient - internal.GRPCNewClient = customGRPCNewClient - defer func() { internal.GRPCNewClient = oldGRPCNewClient }() - - bootstrap.RegisterCredentials(&testDialerCredsBuilder{}) - serverCfg, err := internalbootstrap.ServerConfigForTesting(internalbootstrap.ServerConfigTestingOptions{ - URI: "trafficdirector.googleapis.com:443", - ChannelCreds: []internalbootstrap.ChannelCreds{{Type: testDialerCredsBuilderName}}, - }) - if err != nil { - t.Fatalf("Failed to create server config for testing: %v", err) - } - if serverCfg.DialerOption() == nil { - t.Fatalf("Dialer for xDS transport in server config for testing is nil, want non-nil") - } - // Create a new transport. - opts := transport.Options{ - ServerCfg: serverCfg, - NodeProto: &v3corepb.Node{}, - OnRecvHandler: func(update transport.ResourceUpdate, onDone func()) error { - onDone() - return nil - }, - OnErrorHandler: func(error) {}, - OnSendHandler: func(*transport.ResourceSendInfo) {}, - } - c, err := transport.New(opts) - defer func() { - if c != nil { - c.Close() - } - }() - if err != nil { - t.Fatalf("transport.New(%v) failed: %v", opts, err) - } - // Verify there are three dial options passed to the custom grpc.NewClient. - // The first is opts.ServerCfg.CredsDialOption(), the second is - // grpc.WithKeepaliveParams(), and the third is opts.ServerCfg.DialerOption() - // from the credentials bundle. - if doptsLen != 3 { - t.Fatalf("transport.New(%v) custom grpc.NewClient called with %d dial options, want 3", opts, doptsLen) - } -} From fdc2ec2c84c8d21fb23ba82d2aeb95cdef8091e8 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 9 Oct 2024 16:57:29 -0700 Subject: [PATCH 04/57] xdsclient: deflake TestADS_ResourcesAreRequestedAfterStreamRestart (#7720) --- .../tests/ads_stream_backoff_test.go | 7 ++-- .../tests/ads_stream_restart_test.go | 39 +++++++++++++++++-- 2 files changed, 40 insertions(+), 6 deletions(-) diff --git a/xds/internal/xdsclient/tests/ads_stream_backoff_test.go b/xds/internal/xdsclient/tests/ads_stream_backoff_test.go index c94945321ba5..fe8125048207 100644 --- a/xds/internal/xdsclient/tests/ads_stream_backoff_test.go +++ b/xds/internal/xdsclient/tests/ads_stream_backoff_test.go @@ -434,15 +434,16 @@ func (s) TestADS_ResourceRequestedBeforeStreamCreation(t *testing.T) { func waitForResourceNames(ctx context.Context, t *testing.T, namesCh chan []string, wantNames []string) error { t.Helper() - for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + var lastRequestedNames []string + for ; ; <-time.After(defaultTestShortTimeout) { select { case <-ctx.Done(): + return fmt.Errorf("timeout waiting for resources %v to be requested from the management server. Last requested resources: %v", wantNames, lastRequestedNames) case gotNames := <-namesCh: if cmp.Equal(gotNames, wantNames, cmpopts.EquateEmpty(), cmpopts.SortSlices(func(s1, s2 string) bool { return s1 < s2 })) { return nil } - t.Logf("Received resource names %v, want %v", gotNames, wantNames) + lastRequestedNames = gotNames } } - return fmt.Errorf("timeout waiting for resource to be requested from the management server") } diff --git a/xds/internal/xdsclient/tests/ads_stream_restart_test.go b/xds/internal/xdsclient/tests/ads_stream_restart_test.go index a74a62593f81..f0da932f5fd8 100644 --- a/xds/internal/xdsclient/tests/ads_stream_restart_test.go +++ b/xds/internal/xdsclient/tests/ads_stream_restart_test.go @@ -58,18 +58,24 @@ func (s) TestADS_ResourcesAreRequestedAfterStreamRestart(t *testing.T) { mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{ Listener: lis, OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + t.Logf("Received request for resources: %v of type %s", req.GetResourceNames(), req.GetTypeUrl()) + + // Drain the resource name channels before writing to them to ensure + // that the most recently requested names are made available to the + // test. switch req.GetTypeUrl() { case version.V3ClusterURL: select { - case cdsResourcesCh <- req.GetResourceNames(): + case <-cdsResourcesCh: default: } + cdsResourcesCh <- req.GetResourceNames() case version.V3ListenerURL: - t.Logf("Received LDS request for resources: %v", req.GetResourceNames()) select { - case ldsResourcesCh <- req.GetResourceNames(): + case <-ldsResourcesCh: default: } + ldsResourcesCh <- req.GetResourceNames() } return nil }, @@ -130,6 +136,17 @@ func (s) TestADS_ResourcesAreRequestedAfterStreamRestart(t *testing.T) { t.Fatal(err) } + // Verify the update received by the watcher. + wantListenerUpdate := listenerUpdateErrTuple{ + update: xdsresource.ListenerUpdate{ + RouteConfigName: routeConfigName, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + } + if err := verifyListenerUpdate(ctx, lw.updateCh, wantListenerUpdate); err != nil { + t.Fatal(err) + } + // Cancel the watch for the above listener resource, and verify that an LDS // request with no resource names is sent. ldsCancel() @@ -171,6 +188,11 @@ func (s) TestADS_ResourcesAreRequestedAfterStreamRestart(t *testing.T) { } defer ldsCancel() + // Verify the update received by the watcher. + if err := verifyListenerUpdate(ctx, lw.updateCh, wantListenerUpdate); err != nil { + t.Fatal(err) + } + // Create a cluster resource on the management server, in addition to the // existing listener resource. const clusterName = "cluster" @@ -192,6 +214,17 @@ func (s) TestADS_ResourcesAreRequestedAfterStreamRestart(t *testing.T) { t.Fatal(err) } + // Verify the update received by the watcher. + wantClusterUpdate := clusterUpdateErrTuple{ + update: xdsresource.ClusterUpdate{ + ClusterName: clusterName, + EDSServiceName: clusterName, + }, + } + if err := verifyClusterUpdate(ctx, cw.updateCh, wantClusterUpdate); err != nil { + t.Fatal(err) + } + // Cancel the watch for the above cluster resource, and verify that a CDS // request with no resource names is sent. cdsCancel() From 18a4eacc06189a8384ffb76b5add28c60ad9f169 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 9 Oct 2024 16:57:53 -0700 Subject: [PATCH 05/57] testutils: add couple of log statements to the restartable listener type (#7716) --- internal/testutils/restartable_listener.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/testutils/restartable_listener.go b/internal/testutils/restartable_listener.go index efe4019a08c2..1849b9cfb24e 100644 --- a/internal/testutils/restartable_listener.go +++ b/internal/testutils/restartable_listener.go @@ -81,6 +81,8 @@ func (l *RestartableListener) Addr() net.Addr { // Stop closes existing connections on the listener and prevents new connections // from being accepted. func (l *RestartableListener) Stop() { + logger.Infof("Stopping restartable listener %q", l.Addr()) + l.mu.Lock() l.stopped = true for _, conn := range l.conns { @@ -92,6 +94,8 @@ func (l *RestartableListener) Stop() { // Restart gets a previously stopped listener to start accepting connections. func (l *RestartableListener) Restart() { + logger.Infof("Restarting listener %q", l.Addr()) + l.mu.Lock() l.stopped = false l.mu.Unlock() From 00b9e140ce71480ee7ecc6b85317021e0fe11fbb Mon Sep 17 00:00:00 2001 From: Arjan Singh Bal <46515553+arjan-bal@users.noreply.github.com> Date: Thu, 10 Oct 2024 09:33:47 +0530 Subject: [PATCH 06/57] pickfirst: New pick first policy for dualstack (#7498) --- .github/workflows/coverage.yml | 3 + .github/workflows/testing.yml | 5 + balancer/pickfirst/pickfirst.go | 6 + balancer/pickfirst/pickfirst_test.go | 132 +++ .../pickfirst/pickfirstleaf/pickfirstleaf.go | 624 ++++++++++++ .../pickfirstleaf/pickfirstleaf_ext_test.go | 957 ++++++++++++++++++ .../pickfirstleaf/pickfirstleaf_test.go | 259 +++++ balancer/rls/balancer_test.go | 3 + clientconn.go | 2 + clientconn_test.go | 27 +- internal/balancergroup/balancergroup_test.go | 4 + internal/envconfig/envconfig.go | 5 + test/balancer_switching_test.go | 3 + test/balancer_test.go | 3 + test/clientconn_state_transition_test.go | 18 + test/resolver_update_test.go | 3 + .../clustermanager/clustermanager_test.go | 5 + 17 files changed, 2048 insertions(+), 11 deletions(-) create mode 100644 balancer/pickfirst/pickfirst_test.go create mode 100644 balancer/pickfirst/pickfirstleaf/pickfirstleaf.go create mode 100644 balancer/pickfirst/pickfirstleaf/pickfirstleaf_ext_test.go create mode 100644 balancer/pickfirst/pickfirstleaf/pickfirstleaf_test.go diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 970a48ff2cc4..ef832ed8cbf0 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -19,6 +19,9 @@ jobs: - name: Run coverage run: go test -coverprofile=coverage.out -coverpkg=./... ./... + - name: Run coverage with new pickfirst + run: GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST=true go test -coverprofile=coverage_new_pickfirst.out -coverpkg=./... ./... + - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 with: diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 5a2ad60776eb..a6576a21fa15 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -70,6 +70,11 @@ jobs: - type: tests goversion: '1.21' + - type: tests + goversion: '1.22' + testflags: -race + grpcenv: 'GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST=true' + steps: # Setup the environment. - name: Setup GOARCH diff --git a/balancer/pickfirst/pickfirst.go b/balancer/pickfirst/pickfirst.go index 3e792b2b366f..e069346a7565 100644 --- a/balancer/pickfirst/pickfirst.go +++ b/balancer/pickfirst/pickfirst.go @@ -29,13 +29,19 @@ import ( "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + + _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required. ) func init() { + if envconfig.NewPickFirstEnabled { + return + } balancer.Register(pickfirstBuilder{}) } diff --git a/balancer/pickfirst/pickfirst_test.go b/balancer/pickfirst/pickfirst_test.go new file mode 100644 index 000000000000..43d8b20df3e7 --- /dev/null +++ b/balancer/pickfirst/pickfirst_test.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package pickfirst + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" +) + +const ( + // Default timeout for tests in this package. + defaultTestTimeout = 10 * time.Second + // Default short timeout, to be used when waiting for events which are not + // expected to happen. + defaultTestShortTimeout = 100 * time.Millisecond +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// TestPickFirstLeaf_InitialResolverError sends a resolver error to the balancer +// before a valid resolver update. It verifies that the clientconn state is +// updated to TRANSIENT_FAILURE. +func (s) TestPickFirstLeaf_InitialResolverError(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewBalancerClientConn(t) + bal := pickfirstBuilder{}.Build(cc, balancer.BuildOptions{}) + defer bal.Close() + bal.ResolverError(errors.New("resolution failed: test error")) + + if err := cc.WaitForConnectivityState(ctx, connectivity.TransientFailure); err != nil { + t.Fatalf("cc.WaitForConnectivityState(%v) returned error: %v", connectivity.TransientFailure, err) + } + + // After sending a valid update, the LB policy should report CONNECTING. + ccState := balancer.ClientConnState{ + ResolverState: resolver.State{ + Endpoints: []resolver.Endpoint{ + {Addresses: []resolver.Address{{Addr: "1.1.1.1:1"}}}, + {Addresses: []resolver.Address{{Addr: "2.2.2.2:2"}}}, + }, + }, + } + if err := bal.UpdateClientConnState(ccState); err != nil { + t.Fatalf("UpdateClientConnState(%v) returned error: %v", ccState, err) + } + + if err := cc.WaitForConnectivityState(ctx, connectivity.Connecting); err != nil { + t.Fatalf("cc.WaitForConnectivityState(%v) returned error: %v", connectivity.Connecting, err) + } +} + +// TestPickFirstLeaf_ResolverErrorinTF sends a resolver error to the balancer +// before when it's attempting to connect to a SubConn TRANSIENT_FAILURE. It +// verifies that the picker is updated and the SubConn is not closed. +func (s) TestPickFirstLeaf_ResolverErrorinTF(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewBalancerClientConn(t) + bal := pickfirstBuilder{}.Build(cc, balancer.BuildOptions{}) + defer bal.Close() + + // After sending a valid update, the LB policy should report CONNECTING. + ccState := balancer.ClientConnState{ + ResolverState: resolver.State{ + Endpoints: []resolver.Endpoint{ + {Addresses: []resolver.Address{{Addr: "1.1.1.1:1"}}}, + }, + }, + } + + if err := bal.UpdateClientConnState(ccState); err != nil { + t.Fatalf("UpdateClientConnState(%v) returned error: %v", ccState, err) + } + + sc1 := <-cc.NewSubConnCh + if err := cc.WaitForConnectivityState(ctx, connectivity.Connecting); err != nil { + t.Fatalf("cc.WaitForConnectivityState(%v) returned error: %v", connectivity.Connecting, err) + } + + scErr := fmt.Errorf("test error: connection refused") + sc1.UpdateState(balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + ConnectionError: scErr, + }) + + if err := cc.WaitForPickerWithErr(ctx, scErr); err != nil { + t.Fatalf("cc.WaitForPickerWithErr(%v) returned error: %v", scErr, err) + } + + bal.ResolverError(errors.New("resolution failed: test error")) + if err := cc.WaitForErrPicker(ctx); err != nil { + t.Fatalf("cc.WaitForPickerWithErr() returned error: %v", err) + } + + select { + case <-time.After(defaultTestShortTimeout): + case sc := <-cc.ShutdownSubConnCh: + t.Fatalf("Unexpected SubConn shutdown: %v", sc) + } +} diff --git a/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go new file mode 100644 index 000000000000..48ce8c50e5c1 --- /dev/null +++ b/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -0,0 +1,624 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pickfirstleaf contains the pick_first load balancing policy which +// will be the universal leaf policy after dualstack changes are implemented. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package pickfirstleaf + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/internal" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +func init() { + if envconfig.NewPickFirstEnabled { + // Register as the default pick_first balancer. + Name = "pick_first" + } + balancer.Register(pickfirstBuilder{}) +} + +var ( + logger = grpclog.Component("pick-first-leaf-lb") + // Name is the name of the pick_first_leaf balancer. + // It is changed to "pick_first" in init() if this balancer is to be + // registered as the default pickfirst. + Name = "pick_first_leaf" +) + +// TODO: change to pick-first when this becomes the default pick_first policy. +const logPrefix = "[pick-first-leaf-lb %p] " + +type pickfirstBuilder struct{} + +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{ + cc: cc, + addressList: addressList{}, + subConns: resolver.NewAddressMap(), + state: connectivity.Connecting, + mu: sync.Mutex{}, + } + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b +} + +func (b pickfirstBuilder) Name() string { + return Name +} + +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of endpoints received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +// scData keeps track of the current state of the subConn. +// It is not safe for concurrent access. +type scData struct { + // The following fields are initialized at build time and read-only after + // that. + subConn balancer.SubConn + addr resolver.Address + + state connectivity.State + lastErr error +} + +func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + sd := &scData{ + state: connectivity.Idle, + addr: addr, + } + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(sd, state) + }, + }) + if err != nil { + return nil, err + } + sd.subConn = sc + return sd, nil +} + +type pickfirstBalancer struct { + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn + + // The mutex is used to ensure synchronization of updates triggered + // from the idle picker and the already serialized resolver, + // SubConn state updates. + mu sync.Mutex + state connectivity.State + // scData for active subonns mapped by address. + subConns *resolver.AddressMap + addressList addressList + firstPass bool + numTF int +} + +// ResolverError is called by the ClientConn when the name resolver produces +// an error or when pickfirst determined the resolver update to be invalid. +func (b *pickfirstBalancer) ResolverError(err error) { + b.mu.Lock() + defer b.mu.Unlock() + b.resolverErrorLocked(err) +} + +func (b *pickfirstBalancer) resolverErrorLocked(err error) { + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) + } + + // The picker will not change since the balancer does not currently + // report an error. If the balancer hasn't received a single good resolver + // update yet, transition to TRANSIENT_FAILURE. + if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { + if b.logger.V(2) { + b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") + } + return + } + + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) +} + +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + b.mu.Lock() + defer b.mu.Unlock() + if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { + // Cleanup state pertaining to the previous resolver state. + // Treat an empty address list like an error by calling b.ResolverError. + b.state = connectivity.TransientFailure + b.closeSubConnsLocked() + b.addressList.updateAddrs(nil) + b.resolverErrorLocked(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) + } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + + var newAddrs []resolver.Address + if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { + // Perform the optional shuffling described in gRFC A62. The shuffling + // will change the order of endpoints but not touch the order of the + // addresses within each endpoint. - A61 + if cfg.ShuffleAddressList { + endpoints = append([]resolver.Endpoint{}, endpoints...) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + + // "Flatten the list by concatenating the ordered list of addresses for + // each of the endpoints, in order." - A61 + for _, endpoint := range endpoints { + // "In the flattened list, interleave addresses from the two address + // families, as per RFC-8305 section 4." - A61 + // TODO: support the above language. + newAddrs = append(newAddrs, endpoint.Addresses...) + } + } else { + // Endpoints not set, process addresses until we migrate resolver + // emissions fully to Endpoints. The top channel does wrap emitted + // addresses with endpoints, however some balancers such as weighted + // target do not forward the corresponding correct endpoints down/split + // endpoints properly. Once all balancers correctly forward endpoints + // down, can delete this else conditional. + newAddrs = state.ResolverState.Addresses + if cfg.ShuffleAddressList { + newAddrs = append([]resolver.Address{}, newAddrs...) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + } + + // If an address appears in multiple endpoints or in the same endpoint + // multiple times, we keep it only once. We will create only one SubConn + // for the address because an AddressMap is used to store SubConns. + // Not de-duplicating would result in attempting to connect to the same + // SubConn multiple times in the same pass. We don't want this. + newAddrs = deDupAddresses(newAddrs) + + // Since we have a new set of addresses, we are again at first pass. + b.firstPass = true + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + prevAddr := b.addressList.currentAddress() + prevAddrsCount := b.addressList.size() + b.addressList.updateAddrs(newAddrs) + if b.state == connectivity.Ready && b.addressList.seekTo(prevAddr) { + return nil + } + + b.reconcileSubConnsLocked(newAddrs) + // If it's the first resolver update or the balancer was already READY + // (but the new address list does not contain the ready SubConn) or + // CONNECTING, enter CONNECTING. + // We may be in TRANSIENT_FAILURE due to a previous empty address list, + // we should still enter CONNECTING because the sticky TF behaviour + // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported + // due to connectivity failures. + if b.state == connectivity.Ready || b.state == connectivity.Connecting || prevAddrsCount == 0 { + // Start connection attempt at first address. + b.state = connectivity.Connecting + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.requestConnectionLocked() + } else if b.state == connectivity.TransientFailure { + // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until + // we're READY. See A62. + b.requestConnectionLocked() + } + return nil +} + +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) Close() { + b.mu.Lock() + defer b.mu.Unlock() + b.closeSubConnsLocked() + b.state = connectivity.Shutdown +} + +// ExitIdle moves the balancer out of idle state. It can be called concurrently +// by the idlePicker and clientConn so access to variables should be +// synchronized. +func (b *pickfirstBalancer) ExitIdle() { + b.mu.Lock() + defer b.mu.Unlock() + if b.state == connectivity.Idle && b.addressList.currentAddress() == b.addressList.first() { + b.firstPass = true + b.requestConnectionLocked() + } +} + +func (b *pickfirstBalancer) closeSubConnsLocked() { + for _, sd := range b.subConns.Values() { + sd.(*scData).subConn.Shutdown() + } + b.subConns = resolver.NewAddressMap() +} + +// deDupAddresses ensures that each address appears only once in the slice. +func deDupAddresses(addrs []resolver.Address) []resolver.Address { + seenAddrs := resolver.NewAddressMap() + retAddrs := []resolver.Address{} + + for _, addr := range addrs { + if _, ok := seenAddrs.Get(addr); ok { + continue + } + retAddrs = append(retAddrs, addr) + } + return retAddrs +} + +func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { + // Remove old subConns that were not in new address list. + oldAddrsMap := resolver.NewAddressMap() + for _, k := range b.subConns.Keys() { + oldAddrsMap.Set(k, true) + } + + // Flatten the new endpoint addresses. + newAddrsMap := resolver.NewAddressMap() + for _, addr := range newAddrs { + newAddrsMap.Set(addr, true) + } + + // Shut them down and remove them. + for _, oldAddr := range oldAddrsMap.Keys() { + if _, ok := newAddrsMap.Get(oldAddr); ok { + continue + } + val, _ := b.subConns.Get(oldAddr) + val.(*scData).subConn.Shutdown() + b.subConns.Delete(oldAddr) + } +} + +// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn +// becomes ready, which means that all other subConn must be shutdown. +func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if sd.subConn != selected.subConn { + sd.subConn.Shutdown() + } + } + b.subConns = resolver.NewAddressMap() + b.subConns.Set(selected.addr, selected) +} + +// requestConnectionLocked starts connecting on the subchannel corresponding to +// the current address. If no subchannel exists, one is created. If the current +// subchannel is in TransientFailure, a connection to the next address is +// attempted until a subchannel is found. +func (b *pickfirstBalancer) requestConnectionLocked() { + if !b.addressList.isValid() { + return + } + var lastErr error + for valid := true; valid; valid = b.addressList.increment() { + curAddr := b.addressList.currentAddress() + sd, ok := b.subConns.Get(curAddr) + if !ok { + var err error + // We want to assign the new scData to sd from the outer scope, + // hence we can't use := below. + sd, err = b.newSCData(curAddr) + if err != nil { + // This should never happen, unless the clientConn is being shut + // down. + if b.logger.V(2) { + b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) + } + // Do nothing, the LB policy will be closed soon. + return + } + b.subConns.Set(curAddr, sd) + } + + scd := sd.(*scData) + switch scd.state { + case connectivity.Idle: + scd.subConn.Connect() + case connectivity.TransientFailure: + // Try the next address. + lastErr = scd.lastErr + continue + case connectivity.Ready: + // Should never happen. + b.logger.Errorf("Requesting a connection even though we have a READY SubConn") + case connectivity.Shutdown: + // Should never happen. + b.logger.Errorf("SubConn with state SHUTDOWN present in SubConns map") + case connectivity.Connecting: + // Wait for the SubConn to report success or failure. + } + return + } + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the + // first pass. + b.endFirstPassLocked(lastErr) +} + +func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + oldState := sd.state + sd.state = newState.ConnectivityState + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes this + // SubConn. + if activeSD, found := b.subConns.Get(sd.addr); !found || activeSD != sd { + return + } + if newState.ConnectivityState == connectivity.Shutdown { + return + } + + if newState.ConnectivityState == connectivity.Ready { + b.shutdownRemainingLocked(sd) + if !b.addressList.seekTo(sd.addr) { + // This should not fail as we should have only one SubConn after + // entering READY. The SubConn should be present in the addressList. + b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) + return + } + b.state = connectivity.Ready + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + return + } + + // If the LB policy is READY, and it receives a subchannel state change, + // it means that the READY subchannel has failed. + // A SubConn can also transition from CONNECTING directly to IDLE when + // a transport is successfully created, but the connection fails + // before the SubConn can send the notification for READY. We treat + // this as a successful connection and transition to IDLE. + if (b.state == connectivity.Ready && newState.ConnectivityState != connectivity.Ready) || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // Once a transport fails, the balancer enters IDLE and starts from + // the first address when the picker is used. + b.shutdownRemainingLocked(sd) + b.state = connectivity.Idle + b.addressList.reset() + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, + }) + return + } + + if b.firstPass { + switch newState.ConnectivityState { + case connectivity.Connecting: + // The balancer can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's in TRANSIENT_FAILURE, stay in + // TRANSIENT_FAILURE until it's READY. See A62. + // If the balancer is already in CONNECTING, no update is needed. + if b.state == connectivity.Idle { + b.state = connectivity.Connecting + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + } + case connectivity.TransientFailure: + sd.lastErr = newState.ConnectionError + // Since we're re-using common SubConns while handling resolver + // updates, we could receive an out of turn TRANSIENT_FAILURE from + // a pass over the previous address list. We ignore such updates. + + if curAddr := b.addressList.currentAddress(); !equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + return + } + if b.addressList.increment() { + b.requestConnectionLocked() + return + } + // End of the first pass. + b.endFirstPassLocked(newState.ConnectionError) + } + return + } + + // We have finished the first pass, keep re-connecting failing SubConns. + switch newState.ConnectivityState { + case connectivity.TransientFailure: + b.numTF = (b.numTF + 1) % b.subConns.Len() + sd.lastErr = newState.ConnectionError + if b.numTF%b.subConns.Len() == 0 { + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: newState.ConnectionError}, + }) + } + // We don't need to request re-resolution since the SubConn already + // does that before reporting TRANSIENT_FAILURE. + // TODO: #7534 - Move re-resolution requests from SubConn into + // pick_first. + case connectivity.Idle: + sd.subConn.Connect() + } +} + +func (b *pickfirstBalancer) endFirstPassLocked(lastErr error) { + b.firstPass = false + b.numTF = 0 + b.state = connectivity.TransientFailure + + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: lastErr}, + }) + // Start re-connecting all the SubConns that are already in IDLE. + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if sd.state == connectivity.Idle { + sd.subConn.Connect() + } + } +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + exitIdle func() +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.exitIdle() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + +// addressList manages sequentially iterating over addresses present in a list +// of endpoints. It provides a 1 dimensional view of the addresses present in +// the endpoints. +// This type is not safe for concurrent access. +type addressList struct { + addresses []resolver.Address + idx int +} + +func (al *addressList) isValid() bool { + return al.idx < len(al.addresses) +} + +func (al *addressList) size() int { + return len(al.addresses) +} + +// increment moves to the next index in the address list. +// This method returns false if it went off the list, true otherwise. +func (al *addressList) increment() bool { + if !al.isValid() { + return false + } + al.idx++ + return al.idx < len(al.addresses) +} + +// currentAddress returns the current address pointed to in the addressList. +// If the list is in an invalid state, it returns an empty address instead. +func (al *addressList) currentAddress() resolver.Address { + if !al.isValid() { + return resolver.Address{} + } + return al.addresses[al.idx] +} + +// first returns the first address in the list. If the list is empty, it returns +// an empty address instead. +func (al *addressList) first() resolver.Address { + if len(al.addresses) == 0 { + return resolver.Address{} + } + return al.addresses[0] +} + +func (al *addressList) reset() { + al.idx = 0 +} + +func (al *addressList) updateAddrs(addrs []resolver.Address) { + al.addresses = addrs + al.reset() +} + +// seekTo returns false if the needle was not found and the current index was +// left unchanged. +func (al *addressList) seekTo(needle resolver.Address) bool { + for ai, addr := range al.addresses { + if !equalAddressIgnoringBalAttributes(&addr, &needle) { + continue + } + al.idx = ai + return true + } + return false +} + +// equalAddressIgnoringBalAttributes returns true is a and b are considered +// equal. This is different from the Equal method on the resolver.Address type +// which considers all fields to determine equality. Here, we only consider +// fields that are meaningful to the SubConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} diff --git a/balancer/pickfirst/pickfirstleaf/pickfirstleaf_ext_test.go b/balancer/pickfirst/pickfirstleaf/pickfirstleaf_ext_test.go new file mode 100644 index 000000000000..2ab40ef1615a --- /dev/null +++ b/balancer/pickfirst/pickfirstleaf/pickfirstleaf_ext_test.go @@ -0,0 +1,957 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package pickfirstleaf_test + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/pickfirst" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/status" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +const ( + // Default timeout for tests in this package. + defaultTestTimeout = 10 * time.Second + // Default short timeout, to be used when waiting for events which are not + // expected to happen. + defaultTestShortTimeout = 100 * time.Millisecond + stateStoringBalancerName = "state_storing" +) + +var stateStoringServiceConfig = fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateStoringBalancerName) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// setupPickFirstLeaf performs steps required for pick_first tests. It starts a +// bunch of backends exporting the TestService, creates a ClientConn to them +// with service config specifying the use of the state_storing LB policy. +func setupPickFirstLeaf(t *testing.T, backendCount int, opts ...grpc.DialOption) (*grpc.ClientConn, *manual.Resolver, *backendManager) { + t.Helper() + r := manual.NewBuilderWithScheme("whatever") + backends := make([]*stubserver.StubServer, backendCount) + addrs := make([]resolver.Address, backendCount) + + for i := 0; i < backendCount; i++ { + backend := stubserver.StartTestService(t, nil) + t.Cleanup(func() { + backend.Stop() + }) + backends[i] = backend + addrs[i] = resolver.Address{Addr: backend.Address} + } + + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(stateStoringServiceConfig), + } + dopts = append(dopts, opts...) + cc, err := grpc.NewClient(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.NewClient() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // At this point, the resolver has not returned any addresses to the channel. + // This RPC must block until the context expires. + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + return cc, r, &backendManager{backends} +} + +// TestPickFirstLeaf_SimpleResolverUpdate tests the behaviour of the pick first +// policy when given an list of addresses. The following steps are carried +// out in order: +// 1. A list of addresses are given through the resolver. Only one +// of the servers is running. +// 2. RPCs are sent to verify they reach the running server. +// +// The state transitions of the ClientConn and all the subconns created are +// verified. +func (s) TestPickFirstLeaf_SimpleResolverUpdate_FirstServerReady(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + balCh := make(chan *stateStoringBalancer, 1) + balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) + + cc, r, bm := setupPickFirstLeaf(t, 2) + addrs := bm.resolverAddrs() + stateSubscriber := &ccStateSubscriber{} + internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) + + r.UpdateState(resolver.State{Addresses: addrs}) + var bal *stateStoringBalancer + select { + case bal = <-balCh: + case <-ctx.Done(): + t.Fatal("Context expired while waiting for balancer to be built") + } + testutils.AwaitState(ctx, t, cc, connectivity.Ready) + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + wantSCStates := []scState{ + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Ready}, + } + if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { + t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + } + + wantConnStateTransitions := []connectivity.State{ + connectivity.Connecting, + connectivity.Ready, + } + if diff := cmp.Diff(wantConnStateTransitions, stateSubscriber.transitions); diff != "" { + t.Errorf("ClientConn states mismatch (-want +got):\n%s", diff) + } +} + +func (s) TestPickFirstLeaf_SimpleResolverUpdate_FirstServerUnReady(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + balCh := make(chan *stateStoringBalancer, 1) + balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) + + cc, r, bm := setupPickFirstLeaf(t, 2) + addrs := bm.resolverAddrs() + stateSubscriber := &ccStateSubscriber{} + internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) + bm.stopAllExcept(1) + + r.UpdateState(resolver.State{Addresses: addrs}) + var bal *stateStoringBalancer + select { + case bal = <-balCh: + case <-ctx.Done(): + t.Fatal("Context expired while waiting for balancer to be built") + } + testutils.AwaitState(ctx, t, cc, connectivity.Ready) + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } + + wantSCStates := []scState{ + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Shutdown}, + {Addrs: []resolver.Address{addrs[1]}, State: connectivity.Ready}, + } + if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { + t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + } + + wantConnStateTransitions := []connectivity.State{ + connectivity.Connecting, + connectivity.Ready, + } + if diff := cmp.Diff(wantConnStateTransitions, stateSubscriber.transitions); diff != "" { + t.Errorf("ClientConn states mismatch (-want +got):\n%s", diff) + } +} + +func (s) TestPickFirstLeaf_SimpleResolverUpdate_DuplicateAddrs(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + balCh := make(chan *stateStoringBalancer, 1) + balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) + + cc, r, bm := setupPickFirstLeaf(t, 2) + addrs := bm.resolverAddrs() + stateSubscriber := &ccStateSubscriber{} + internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) + bm.stopAllExcept(1) + + // Add a duplicate entry in the addresslist + r.UpdateState(resolver.State{ + Addresses: append([]resolver.Address{addrs[0]}, addrs...), + }) + var bal *stateStoringBalancer + select { + case bal = <-balCh: + case <-ctx.Done(): + t.Fatal("Context expired while waiting for balancer to be built") + } + testutils.AwaitState(ctx, t, cc, connectivity.Ready) + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } + + wantSCStates := []scState{ + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Shutdown}, + {Addrs: []resolver.Address{addrs[1]}, State: connectivity.Ready}, + } + if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { + t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + } + + wantConnStateTransitions := []connectivity.State{ + connectivity.Connecting, + connectivity.Ready, + } + if diff := cmp.Diff(wantConnStateTransitions, stateSubscriber.transitions); diff != "" { + t.Errorf("ClientConn states mismatch (-want +got):\n%s", diff) + } +} + +// TestPickFirstLeaf_ResolverUpdates_DisjointLists tests the behaviour of the pick first +// policy when the following steps are carried out in order: +// 1. A list of addresses are given through the resolver. Only one +// of the servers is running. +// 2. RPCs are sent to verify they reach the running server. +// 3. A second resolver update is sent. Again, only one of the servers is +// running. This may not be the same server as before. +// 4. RPCs are sent to verify they reach the running server. +// +// The state transitions of the ClientConn and all the subconns created are +// verified. +func (s) TestPickFirstLeaf_ResolverUpdates_DisjointLists(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + balCh := make(chan *stateStoringBalancer, 1) + balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) + cc, r, bm := setupPickFirstLeaf(t, 4) + addrs := bm.resolverAddrs() + stateSubscriber := &ccStateSubscriber{} + internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) + + bm.backends[0].S.Stop() + bm.backends[0].S = nil + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[0], addrs[1]}}) + var bal *stateStoringBalancer + select { + case bal = <-balCh: + case <-ctx.Done(): + t.Fatal("Context expired while waiting for balancer to be built") + } + testutils.AwaitState(ctx, t, cc, connectivity.Ready) + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } + wantSCStates := []scState{ + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Shutdown}, + {Addrs: []resolver.Address{addrs[1]}, State: connectivity.Ready}, + } + + if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { + t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + } + + bm.backends[2].S.Stop() + bm.backends[2].S = nil + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[2], addrs[3]}}) + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[3]); err != nil { + t.Fatal(err) + } + wantSCStates = []scState{ + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Shutdown}, + {Addrs: []resolver.Address{addrs[1]}, State: connectivity.Shutdown}, + {Addrs: []resolver.Address{addrs[2]}, State: connectivity.Shutdown}, + {Addrs: []resolver.Address{addrs[3]}, State: connectivity.Ready}, + } + + if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { + t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + } + + wantConnStateTransitions := []connectivity.State{ + connectivity.Connecting, + connectivity.Ready, + connectivity.Connecting, + connectivity.Ready, + } + if diff := cmp.Diff(wantConnStateTransitions, stateSubscriber.transitions); diff != "" { + t.Errorf("ClientConn states mismatch (-want +got):\n%s", diff) + } +} + +func (s) TestPickFirstLeaf_ResolverUpdates_ActiveBackendInUpdatedList(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + balCh := make(chan *stateStoringBalancer, 1) + balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) + cc, r, bm := setupPickFirstLeaf(t, 3) + addrs := bm.resolverAddrs() + stateSubscriber := &ccStateSubscriber{} + internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) + + bm.backends[0].S.Stop() + bm.backends[0].S = nil + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[0], addrs[1]}}) + var bal *stateStoringBalancer + select { + case bal = <-balCh: + case <-ctx.Done(): + t.Fatal("Context expired while waiting for balancer to be built") + } + testutils.AwaitState(ctx, t, cc, connectivity.Ready) + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } + wantSCStates := []scState{ + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Shutdown}, + {Addrs: []resolver.Address{addrs[1]}, State: connectivity.Ready}, + } + + if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { + t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + } + + bm.backends[2].S.Stop() + bm.backends[2].S = nil + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[2], addrs[1]}}) + + // Verify that the ClientConn stays in READY. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + testutils.AwaitNoStateChange(sCtx, t, cc, connectivity.Ready) + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } + wantSCStates = []scState{ + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Shutdown}, + {Addrs: []resolver.Address{addrs[1]}, State: connectivity.Ready}, + } + + if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { + t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + } + + wantConnStateTransitions := []connectivity.State{ + connectivity.Connecting, + connectivity.Ready, + } + if diff := cmp.Diff(wantConnStateTransitions, stateSubscriber.transitions); diff != "" { + t.Errorf("ClientConn states mismatch (-want +got):\n%s", diff) + } +} + +func (s) TestPickFirstLeaf_ResolverUpdates_InActiveBackendInUpdatedList(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + balCh := make(chan *stateStoringBalancer, 1) + balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) + cc, r, bm := setupPickFirstLeaf(t, 3) + addrs := bm.resolverAddrs() + stateSubscriber := &ccStateSubscriber{} + internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) + + bm.backends[0].S.Stop() + bm.backends[0].S = nil + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[0], addrs[1]}}) + var bal *stateStoringBalancer + select { + case bal = <-balCh: + case <-ctx.Done(): + t.Fatal("Context expired while waiting for balancer to be built") + } + testutils.AwaitState(ctx, t, cc, connectivity.Ready) + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } + wantSCStates := []scState{ + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Shutdown}, + {Addrs: []resolver.Address{addrs[1]}, State: connectivity.Ready}, + } + + if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { + t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + } + + bm.backends[2].S.Stop() + bm.backends[2].S = nil + if err := bm.backends[0].StartServer(); err != nil { + t.Fatalf("Failed to re-start test backend: %v", err) + } + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[0], addrs[2]}}) + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + wantSCStates = []scState{ + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Shutdown}, + {Addrs: []resolver.Address{addrs[1]}, State: connectivity.Shutdown}, + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Ready}, + } + + if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { + t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + } + + wantConnStateTransitions := []connectivity.State{ + connectivity.Connecting, + connectivity.Ready, + connectivity.Connecting, + connectivity.Ready, + } + if diff := cmp.Diff(wantConnStateTransitions, stateSubscriber.transitions); diff != "" { + t.Errorf("ClientConn states mismatch (-want +got):\n%s", diff) + } +} + +func (s) TestPickFirstLeaf_ResolverUpdates_IdenticalLists(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + balCh := make(chan *stateStoringBalancer, 1) + balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) + cc, r, bm := setupPickFirstLeaf(t, 2) + addrs := bm.resolverAddrs() + stateSubscriber := &ccStateSubscriber{} + internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) + + bm.backends[0].S.Stop() + bm.backends[0].S = nil + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[0], addrs[1]}}) + var bal *stateStoringBalancer + select { + case bal = <-balCh: + case <-ctx.Done(): + t.Fatal("Context expired while waiting for balancer to be built") + } + testutils.AwaitState(ctx, t, cc, connectivity.Ready) + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } + wantSCStates := []scState{ + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Shutdown}, + {Addrs: []resolver.Address{addrs[1]}, State: connectivity.Ready}, + } + + if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { + t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + } + + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[0], addrs[1]}}) + + // Verify that the ClientConn stays in READY. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + testutils.AwaitNoStateChange(sCtx, t, cc, connectivity.Ready) + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } + wantSCStates = []scState{ + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Shutdown}, + {Addrs: []resolver.Address{addrs[1]}, State: connectivity.Ready}, + } + + if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { + t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + } + + wantConnStateTransitions := []connectivity.State{ + connectivity.Connecting, + connectivity.Ready, + } + if diff := cmp.Diff(wantConnStateTransitions, stateSubscriber.transitions); diff != "" { + t.Errorf("ClientConn states mismatch (-want +got):\n%s", diff) + } +} + +// TestPickFirstLeaf_StopConnectedServer tests the behaviour of the pick first +// policy when the connected server is shut down. It carries out the following +// steps in order: +// 1. A list of addresses are given through the resolver. Only one +// of the servers is running. +// 2. The running server is stopped, causing the ClientConn to enter IDLE. +// 3. A (possibly different) server is started. +// 4. RPCs are made to kick the ClientConn out of IDLE. The test verifies that +// the RPCs reach the running server. +// +// The test verifies the ClientConn state transitions. +func (s) TestPickFirstLeaf_StopConnectedServer_FirstServerRestart(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + balCh := make(chan *stateStoringBalancer, 1) + balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) + cc, r, bm := setupPickFirstLeaf(t, 2) + addrs := bm.resolverAddrs() + stateSubscriber := &ccStateSubscriber{} + internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) + + // shutdown all active backends except the target. + bm.stopAllExcept(0) + + r.UpdateState(resolver.State{Addresses: addrs}) + var bal *stateStoringBalancer + select { + case bal = <-balCh: + case <-ctx.Done(): + t.Fatal("Context expired while waiting for balancer to be built") + } + testutils.AwaitState(ctx, t, cc, connectivity.Ready) + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + wantSCStates := []scState{ + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Ready}, + } + + if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { + t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + } + + // Shut down the connected server. + bm.backends[0].S.Stop() + bm.backends[0].S = nil + testutils.AwaitState(ctx, t, cc, connectivity.Idle) + + // Start the new target server. + if err := bm.backends[0].StartServer(); err != nil { + t.Fatalf("Failed to start server: %v", err) + } + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { + t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + } + + wantConnStateTransitions := []connectivity.State{ + connectivity.Connecting, + connectivity.Ready, + connectivity.Idle, + connectivity.Connecting, + connectivity.Ready, + } + if diff := cmp.Diff(wantConnStateTransitions, stateSubscriber.transitions); diff != "" { + t.Errorf("ClientConn states mismatch (-want +got):\n%s", diff) + } +} + +func (s) TestPickFirstLeaf_StopConnectedServer_SecondServerRestart(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + balCh := make(chan *stateStoringBalancer, 1) + balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) + cc, r, bm := setupPickFirstLeaf(t, 2) + addrs := bm.resolverAddrs() + stateSubscriber := &ccStateSubscriber{} + internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) + + // shutdown all active backends except the target. + bm.stopAllExcept(1) + + r.UpdateState(resolver.State{Addresses: addrs}) + var bal *stateStoringBalancer + select { + case bal = <-balCh: + case <-ctx.Done(): + t.Fatal("Context expired while waiting for balancer to be built") + } + testutils.AwaitState(ctx, t, cc, connectivity.Ready) + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } + + wantSCStates := []scState{ + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Shutdown}, + {Addrs: []resolver.Address{addrs[1]}, State: connectivity.Ready}, + } + + if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { + t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + } + + // Shut down the connected server. + bm.backends[1].S.Stop() + bm.backends[1].S = nil + testutils.AwaitState(ctx, t, cc, connectivity.Idle) + + // Start the new target server. + if err := bm.backends[1].StartServer(); err != nil { + t.Fatalf("Failed to start server: %v", err) + } + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } + + wantSCStates = []scState{ + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Shutdown}, + {Addrs: []resolver.Address{addrs[1]}, State: connectivity.Ready}, + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Shutdown}, + } + + if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { + t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + } + + wantConnStateTransitions := []connectivity.State{ + connectivity.Connecting, + connectivity.Ready, + connectivity.Idle, + connectivity.Connecting, + connectivity.Ready, + } + if diff := cmp.Diff(wantConnStateTransitions, stateSubscriber.transitions); diff != "" { + t.Errorf("ClientConn states mismatch (-want +got):\n%s", diff) + } +} + +func (s) TestPickFirstLeaf_StopConnectedServer_SecondServerToFirst(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + balCh := make(chan *stateStoringBalancer, 1) + balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) + cc, r, bm := setupPickFirstLeaf(t, 2) + addrs := bm.resolverAddrs() + stateSubscriber := &ccStateSubscriber{} + internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) + + // shutdown all active backends except the target. + bm.stopAllExcept(1) + + r.UpdateState(resolver.State{Addresses: addrs}) + var bal *stateStoringBalancer + select { + case bal = <-balCh: + case <-ctx.Done(): + t.Fatal("Context expired while waiting for balancer to be built") + } + testutils.AwaitState(ctx, t, cc, connectivity.Ready) + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } + + wantSCStates := []scState{ + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Shutdown}, + {Addrs: []resolver.Address{addrs[1]}, State: connectivity.Ready}, + } + + if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { + t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + } + + // Shut down the connected server. + bm.backends[1].S.Stop() + bm.backends[1].S = nil + testutils.AwaitState(ctx, t, cc, connectivity.Idle) + + // Start the new target server. + if err := bm.backends[0].StartServer(); err != nil { + t.Fatalf("Failed to start server: %v", err) + } + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + wantSCStates = []scState{ + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Shutdown}, + {Addrs: []resolver.Address{addrs[1]}, State: connectivity.Shutdown}, + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Ready}, + } + + if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { + t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + } + + wantConnStateTransitions := []connectivity.State{ + connectivity.Connecting, + connectivity.Ready, + connectivity.Idle, + connectivity.Connecting, + connectivity.Ready, + } + if diff := cmp.Diff(wantConnStateTransitions, stateSubscriber.transitions); diff != "" { + t.Errorf("ClientConn states mismatch (-want +got):\n%s", diff) + } +} + +func (s) TestPickFirstLeaf_StopConnectedServer_FirstServerToSecond(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + balCh := make(chan *stateStoringBalancer, 1) + balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) + cc, r, bm := setupPickFirstLeaf(t, 2) + addrs := bm.resolverAddrs() + stateSubscriber := &ccStateSubscriber{} + internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) + + // shutdown all active backends except the target. + bm.stopAllExcept(0) + + r.UpdateState(resolver.State{Addresses: addrs}) + var bal *stateStoringBalancer + select { + case bal = <-balCh: + case <-ctx.Done(): + t.Fatal("Context expired while waiting for balancer to be built") + } + testutils.AwaitState(ctx, t, cc, connectivity.Ready) + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + wantSCStates := []scState{ + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Ready}, + } + + if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { + t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + } + + // Shut down the connected server. + bm.backends[0].S.Stop() + bm.backends[0].S = nil + testutils.AwaitState(ctx, t, cc, connectivity.Idle) + + // Start the new target server. + if err := bm.backends[1].StartServer(); err != nil { + t.Fatalf("Failed to start server: %v", err) + } + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } + + wantSCStates = []scState{ + {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Shutdown}, + {Addrs: []resolver.Address{addrs[1]}, State: connectivity.Ready}, + } + + if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { + t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + } + + wantConnStateTransitions := []connectivity.State{ + connectivity.Connecting, + connectivity.Ready, + connectivity.Idle, + connectivity.Connecting, + connectivity.Ready, + } + if diff := cmp.Diff(wantConnStateTransitions, stateSubscriber.transitions); diff != "" { + t.Errorf("ClientConn states mismatch (-want +got):\n%s", diff) + } +} + +// TestPickFirstLeaf_EmptyAddressList carries out the following steps in order: +// 1. Send a resolver update with one running backend. +// 2. Send an empty address list causing the balancer to enter TRANSIENT_FAILURE. +// 3. Send a resolver update with one running backend. +// The test verifies the ClientConn state transitions. +func (s) TestPickFirstLeaf_EmptyAddressList(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + balChan := make(chan *stateStoringBalancer, 1) + balancer.Register(&stateStoringBalancerBuilder{balancer: balChan}) + cc, r, bm := setupPickFirstLeaf(t, 1) + addrs := bm.resolverAddrs() + + stateSubscriber := &ccStateSubscriber{} + internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) + + r.UpdateState(resolver.State{Addresses: addrs}) + testutils.AwaitState(ctx, t, cc, connectivity.Ready) + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + r.UpdateState(resolver.State{}) + testutils.AwaitState(ctx, t, cc, connectivity.TransientFailure) + + // The balancer should have entered transient failure. + // It should transition to CONNECTING from TRANSIENT_FAILURE as sticky TF + // only applies when the initial TF is reported due to connection failures + // and not bad resolver states. + r.UpdateState(resolver.State{Addresses: addrs}) + testutils.AwaitState(ctx, t, cc, connectivity.Ready) + + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + wantTransitions := []connectivity.State{ + // From first resolver update. + connectivity.Connecting, + connectivity.Ready, + // From second update. + connectivity.TransientFailure, + // From third update. + connectivity.Connecting, + connectivity.Ready, + } + + if diff := cmp.Diff(wantTransitions, stateSubscriber.transitions); diff != "" { + t.Errorf("ClientConn states mismatch (-want +got):\n%s", diff) + } +} + +// stateStoringBalancer stores the state of the subconns being created. +type stateStoringBalancer struct { + balancer.Balancer + mu sync.Mutex + scStates []*scState +} + +func (b *stateStoringBalancer) Close() { + b.Balancer.Close() +} + +func (b *stateStoringBalancer) ExitIdle() { + if ib, ok := b.Balancer.(balancer.ExitIdler); ok { + ib.ExitIdle() + } +} + +type stateStoringBalancerBuilder struct { + balancer chan *stateStoringBalancer +} + +func (b *stateStoringBalancerBuilder) Name() string { + return stateStoringBalancerName +} + +func (b *stateStoringBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + bal := &stateStoringBalancer{} + bal.Balancer = balancer.Get(pickfirstleaf.Name).Build(&stateStoringCCWrapper{cc, bal}, opts) + b.balancer <- bal + return bal +} + +func (b *stateStoringBalancer) subConnStates() []scState { + b.mu.Lock() + defer b.mu.Unlock() + ret := []scState{} + for _, s := range b.scStates { + ret = append(ret, *s) + } + return ret +} + +func (b *stateStoringBalancer) addSCState(state *scState) { + b.mu.Lock() + b.scStates = append(b.scStates, state) + b.mu.Unlock() +} + +type stateStoringCCWrapper struct { + balancer.ClientConn + b *stateStoringBalancer +} + +func (ccw *stateStoringCCWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + oldListener := opts.StateListener + scs := &scState{ + State: connectivity.Idle, + Addrs: addrs, + } + ccw.b.addSCState(scs) + opts.StateListener = func(s balancer.SubConnState) { + ccw.b.mu.Lock() + scs.State = s.ConnectivityState + ccw.b.mu.Unlock() + oldListener(s) + } + return ccw.ClientConn.NewSubConn(addrs, opts) +} + +type scState struct { + State connectivity.State + Addrs []resolver.Address +} + +type backendManager struct { + backends []*stubserver.StubServer +} + +func (b *backendManager) stopAllExcept(index int) { + for idx, b := range b.backends { + if idx != index { + b.S.Stop() + b.S = nil + } + } +} + +// resolverAddrs returns a list of resolver addresses for the stub server +// backends. Useful when pushing addresses to the manual resolver. +func (b *backendManager) resolverAddrs() []resolver.Address { + addrs := make([]resolver.Address, len(b.backends)) + for i, backend := range b.backends { + addrs[i] = resolver.Address{Addr: backend.Address} + } + return addrs +} + +type ccStateSubscriber struct { + transitions []connectivity.State +} + +func (c *ccStateSubscriber) OnMessage(msg any) { + c.transitions = append(c.transitions, msg.(connectivity.State)) +} diff --git a/balancer/pickfirst/pickfirstleaf/pickfirstleaf_test.go b/balancer/pickfirst/pickfirstleaf/pickfirstleaf_test.go new file mode 100644 index 000000000000..84b3cb65bed4 --- /dev/null +++ b/balancer/pickfirst/pickfirstleaf/pickfirstleaf_test.go @@ -0,0 +1,259 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package pickfirstleaf + +import ( + "context" + "fmt" + "testing" + "time" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" +) + +const ( + // Default timeout for tests in this package. + defaultTestTimeout = 10 * time.Second + // Default short timeout, to be used when waiting for events which are not + // expected to happen. + defaultTestShortTimeout = 100 * time.Millisecond +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// TestAddressList_Iteration verifies the behaviour of the addressList while +// iterating through the entries. +func (s) TestAddressList_Iteration(t *testing.T) { + addrs := []resolver.Address{ + { + Addr: "192.168.1.1", + ServerName: "test-host-1", + Attributes: attributes.New("key-1", "val-1"), + BalancerAttributes: attributes.New("bal-key-1", "bal-val-1"), + }, + { + Addr: "192.168.1.2", + ServerName: "test-host-2", + Attributes: attributes.New("key-2", "val-2"), + BalancerAttributes: attributes.New("bal-key-2", "bal-val-2"), + }, + { + Addr: "192.168.1.3", + ServerName: "test-host-3", + Attributes: attributes.New("key-3", "val-3"), + BalancerAttributes: attributes.New("bal-key-3", "bal-val-3"), + }, + } + + addressList := addressList{} + emptyAddress := resolver.Address{} + if got, want := addressList.first(), emptyAddress; got != want { + t.Fatalf("addressList.first() = %v, want %v", got, want) + } + + addressList.updateAddrs(addrs) + + if got, want := addressList.first(), addressList.currentAddress(); got != want { + t.Fatalf("addressList.first() = %v, want %v", got, want) + } + + if got, want := addressList.first(), addrs[0]; got != want { + t.Fatalf("addressList.first() = %v, want %v", got, want) + } + + for i := 0; i < len(addrs); i++ { + if got, want := addressList.isValid(), true; got != want { + t.Fatalf("addressList.isValid() = %t, want %t", got, want) + } + if got, want := addressList.currentAddress(), addrs[i]; !want.Equal(got) { + t.Errorf("addressList.currentAddress() = %v, want %v", got, want) + } + if got, want := addressList.increment(), i+1 < len(addrs); got != want { + t.Fatalf("addressList.increment() = %t, want %t", got, want) + } + } + + if got, want := addressList.isValid(), false; got != want { + t.Fatalf("addressList.isValid() = %t, want %t", got, want) + } + + // increment an invalid address list. + if got, want := addressList.increment(), false; got != want { + t.Errorf("addressList.increment() = %t, want %t", got, want) + } + + if got, want := addressList.isValid(), false; got != want { + t.Errorf("addressList.isValid() = %t, want %t", got, want) + } + + addressList.reset() + for i := 0; i < len(addrs); i++ { + if got, want := addressList.isValid(), true; got != want { + t.Fatalf("addressList.isValid() = %t, want %t", got, want) + } + if got, want := addressList.currentAddress(), addrs[i]; !want.Equal(got) { + t.Errorf("addressList.currentAddress() = %v, want %v", got, want) + } + if got, want := addressList.increment(), i+1 < len(addrs); got != want { + t.Fatalf("addressList.increment() = %t, want %t", got, want) + } + } +} + +// TestAddressList_SeekTo verifies the behaviour of addressList.seekTo. +func (s) TestAddressList_SeekTo(t *testing.T) { + addrs := []resolver.Address{ + { + Addr: "192.168.1.1", + ServerName: "test-host-1", + Attributes: attributes.New("key-1", "val-1"), + BalancerAttributes: attributes.New("bal-key-1", "bal-val-1"), + }, + { + Addr: "192.168.1.2", + ServerName: "test-host-2", + Attributes: attributes.New("key-2", "val-2"), + BalancerAttributes: attributes.New("bal-key-2", "bal-val-2"), + }, + { + Addr: "192.168.1.3", + ServerName: "test-host-3", + Attributes: attributes.New("key-3", "val-3"), + BalancerAttributes: attributes.New("bal-key-3", "bal-val-3"), + }, + } + + addressList := addressList{} + addressList.updateAddrs(addrs) + + // Try finding an address in the list. + key := resolver.Address{ + Addr: "192.168.1.2", + ServerName: "test-host-2", + Attributes: attributes.New("key-2", "val-2"), + BalancerAttributes: attributes.New("ignored", "bal-val-2"), + } + + if got, want := addressList.seekTo(key), true; got != want { + t.Errorf("addressList.seekTo(%v) = %t, want %t", key, got, want) + } + + // It should be possible to increment once more now that the pointer has advanced. + if got, want := addressList.increment(), true; got != want { + t.Errorf("addressList.increment() = %t, want %t", got, want) + } + + if got, want := addressList.increment(), false; got != want { + t.Errorf("addressList.increment() = %t, want %t", got, want) + } + + // Seek to the key again, it is behind the pointer now. + if got, want := addressList.seekTo(key), true; got != want { + t.Errorf("addressList.seekTo(%v) = %t, want %t", key, got, want) + } + + // Seek to a key not in the list. + key = resolver.Address{ + Addr: "192.168.1.5", + ServerName: "test-host-5", + Attributes: attributes.New("key-5", "val-5"), + BalancerAttributes: attributes.New("ignored", "bal-val-5"), + } + + if got, want := addressList.seekTo(key), false; got != want { + t.Errorf("addressList.seekTo(%v) = %t, want %t", key, got, want) + } + + // It should be possible to increment once more since the pointer has not advanced. + if got, want := addressList.increment(), true; got != want { + t.Errorf("addressList.increment() = %t, want %t", got, want) + } + + if got, want := addressList.increment(), false; got != want { + t.Errorf("addressList.increment() = %t, want %t", got, want) + } +} + +// TestPickFirstLeaf_TFPickerUpdate sends TRANSIENT_FAILURE SubConn state updates +// for each SubConn managed by a pickfirst balancer. It verifies that the picker +// is updated with the expected frequency. +func (s) TestPickFirstLeaf_TFPickerUpdate(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewBalancerClientConn(t) + bal := pickfirstBuilder{}.Build(cc, balancer.BuildOptions{}) + defer bal.Close() + ccState := balancer.ClientConnState{ + ResolverState: resolver.State{ + Endpoints: []resolver.Endpoint{ + {Addresses: []resolver.Address{{Addr: "1.1.1.1:1"}}}, + {Addresses: []resolver.Address{{Addr: "2.2.2.2:2"}}}, + }, + }, + } + if err := bal.UpdateClientConnState(ccState); err != nil { + t.Fatalf("UpdateClientConnState(%v) returned error: %v", ccState, err) + } + + // PF should report TRANSIENT_FAILURE only once all the sunbconns have failed + // once. + tfErr := fmt.Errorf("test err: connection refused") + sc1 := <-cc.NewSubConnCh + sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure, ConnectionError: tfErr}) + + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatalf("cc.WaitForPickerWithErr(%v) returned error: %v", balancer.ErrNoSubConnAvailable, err) + } + + sc2 := <-cc.NewSubConnCh + sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure, ConnectionError: tfErr}) + + if err := cc.WaitForPickerWithErr(ctx, tfErr); err != nil { + t.Fatalf("cc.WaitForPickerWithErr(%v) returned error: %v", tfErr, err) + } + + // Subsequent TRANSIENT_FAILUREs should be reported only after seeing "# of SubConns" + // TRANSIENT_FAILUREs. + newTfErr := fmt.Errorf("test err: unreachable") + sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure, ConnectionError: newTfErr}) + select { + case <-time.After(defaultTestShortTimeout): + case p := <-cc.NewPickerCh: + sc, err := p.Pick(balancer.PickInfo{}) + t.Fatalf("Unexpected picker update: %v, %v", sc, err) + } + + sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure, ConnectionError: newTfErr}) + if err := cc.WaitForPickerWithErr(ctx, newTfErr); err != nil { + t.Fatalf("cc.WaitForPickerWithErr(%v) returned error: %v", newTfErr, err) + } +} diff --git a/balancer/rls/balancer_test.go b/balancer/rls/balancer_test.go index 16fa77354cde..8c77e3428950 100644 --- a/balancer/rls/balancer_test.go +++ b/balancer/rls/balancer_test.go @@ -1096,6 +1096,9 @@ func (s) TestUpdateStatePauses(t *testing.T) { Init: func(bd *stub.BalancerData) { bd.Data = balancer.Get(pickfirst.Name).Build(bd.ClientConn, bd.BuildOptions) }, + Close: func(bd *stub.BalancerData) { + bd.Data.(balancer.Balancer).Close() + }, ParseConfig: func(sc json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { cfg := &childPolicyConfig{} if err := json.Unmarshal(sc, cfg); err != nil { diff --git a/clientconn.go b/clientconn.go index b47efb33c0e9..4a408d621692 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1249,6 +1249,8 @@ func (ac *addrConn) resetTransportAndUnlock() { ac.mu.Unlock() if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + // TODO: #7534 - Move re-resolution requests into the pick_first LB policy + // to ensure one resolution request per pass instead of per subconn failure. ac.cc.resolveNow(resolver.ResolveNowOptions{}) ac.mu.Lock() if acCtx.Err() != nil { diff --git a/clientconn_test.go b/clientconn_test.go index 0cb09001da04..778fe8269e98 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -37,6 +37,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/transport" @@ -418,17 +419,21 @@ func (s) TestWithTransportCredentialsTLS(t *testing.T) { // When creating a transport configured with n addresses, only calculate the // backoff once per "round" of attempts instead of once per address (n times -// per "round" of attempts). -func (s) TestDial_OneBackoffPerRetryGroup(t *testing.T) { +// per "round" of attempts) for old pickfirst and once per address for new pickfirst. +func (s) TestDial_BackoffCountPerRetryGroup(t *testing.T) { var attempts uint32 + wantBackoffs := uint32(1) + if envconfig.NewPickFirstEnabled { + wantBackoffs = 2 + } getMinConnectTimeout := func() time.Duration { - if atomic.AddUint32(&attempts, 1) == 1 { + if atomic.AddUint32(&attempts, 1) <= wantBackoffs { // Once all addresses are exhausted, hang around and wait for the // client.Close to happen rather than re-starting a new round of // attempts. return time.Hour } - t.Error("only one attempt backoff calculation, but got more") + t.Errorf("only %d attempt backoff calculation, but got more", wantBackoffs) return 0 } @@ -499,6 +504,10 @@ func (s) TestDial_OneBackoffPerRetryGroup(t *testing.T) { t.Fatal("timed out waiting for test to finish") case <-server2Done: } + + if got, want := atomic.LoadUint32(&attempts), wantBackoffs; got != want { + t.Errorf("attempts = %d, want %d", got, want) + } } func (s) TestDialContextCancel(t *testing.T) { @@ -1062,18 +1071,14 @@ func (s) TestUpdateAddresses_NoopIfCalledWithSameAddresses(t *testing.T) { } // Grab the addrConn and call tryUpdateAddrs. - var ac *addrConn client.mu.Lock() for clientAC := range client.conns { - ac = clientAC - break + // Call UpdateAddresses with the same list of addresses, it should be a noop + // (even when the SubConn is Connecting, and doesn't have a curAddr). + clientAC.acbw.UpdateAddresses(clientAC.addrs) } client.mu.Unlock() - // Call UpdateAddresses with the same list of addresses, it should be a noop - // (even when the SubConn is Connecting, and doesn't have a curAddr). - ac.acbw.UpdateAddresses(addrsList) - // We've called tryUpdateAddrs - now let's make server2 close the // connection and check that it continues to server3. close(closeServer2) diff --git a/internal/balancergroup/balancergroup_test.go b/internal/balancergroup/balancergroup_test.go index 8d22c9ac587e..c154c029d8f2 100644 --- a/internal/balancergroup/balancergroup_test.go +++ b/internal/balancergroup/balancergroup_test.go @@ -575,6 +575,7 @@ func (s) TestBalancerGracefulSwitch(t *testing.T) { bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) bg.Start() + defer bg.Close() m1 := make(map[resolver.Address]balancer.SubConn) scs := make(map[balancer.SubConn]bool) @@ -604,6 +605,9 @@ func (s) TestBalancerGracefulSwitch(t *testing.T) { Init: func(bd *stub.BalancerData) { bd.Data = balancer.Get(pickfirst.Name).Build(bd.ClientConn, bd.BuildOptions) }, + Close: func(bd *stub.BalancerData) { + bd.Data.(balancer.Balancer).Close() + }, UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error { ccs.ResolverState.Addresses = ccs.ResolverState.Addresses[1:] bal := bd.Data.(balancer.Balancer) diff --git a/internal/envconfig/envconfig.go b/internal/envconfig/envconfig.go index 452985f8d8f1..6e7dd6b77270 100644 --- a/internal/envconfig/envconfig.go +++ b/internal/envconfig/envconfig.go @@ -50,6 +50,11 @@ var ( // xDS fallback is turned on. If this is unset or is false, only the first // xDS server in the list of server configs will be used. XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) + // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used + // instead of the exiting pickfirst implementation. This can be enabled by + // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" + // to "true". + NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/test/balancer_switching_test.go b/test/balancer_switching_test.go index 8074b59b3a47..e5da19d30d0f 100644 --- a/test/balancer_switching_test.go +++ b/test/balancer_switching_test.go @@ -483,6 +483,9 @@ func (s) TestBalancerSwitch_Graceful(t *testing.T) { pf := balancer.Get(pickfirst.Name) bd.Data = pf.Build(bd.ClientConn, bd.BuildOptions) }, + Close: func(bd *stub.BalancerData) { + bd.Data.(balancer.Balancer).Close() + }, UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error { bal := bd.Data.(balancer.Balancer) close(ccUpdateCh) diff --git a/test/balancer_test.go b/test/balancer_test.go index f27ec4d3fe90..c2405808f2ea 100644 --- a/test/balancer_test.go +++ b/test/balancer_test.go @@ -850,6 +850,9 @@ func (s) TestMetadataInPickResult(t *testing.T) { cc := &testCCWrapper{ClientConn: bd.ClientConn} bd.Data = balancer.Get(pickfirst.Name).Build(cc, bd.BuildOptions) }, + Close: func(bd *stub.BalancerData) { + bd.Data.(balancer.Balancer).Close() + }, UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error { bal := bd.Data.(balancer.Balancer) return bal.UpdateClientConnState(ccs) diff --git a/test/clientconn_state_transition_test.go b/test/clientconn_state_transition_test.go index 6e9bfb37289d..56ebafaa9308 100644 --- a/test/clientconn_state_transition_test.go +++ b/test/clientconn_state_transition_test.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" @@ -323,6 +324,13 @@ func (s) TestStateTransitions_TriesAllAddrsBeforeTransientFailure(t *testing.T) client, err := grpc.Dial("whatever:///this-gets-overwritten", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName)), + grpc.WithConnectParams(grpc.ConnectParams{ + // Set a really long back-off delay to ensure the first subConn does + // not enter IDLE before the second subConn connects. + Backoff: backoff.Config{ + BaseDelay: 1 * time.Hour, + }, + }), grpc.WithResolvers(rb)) if err != nil { t.Fatal(err) @@ -334,6 +342,16 @@ func (s) TestStateTransitions_TriesAllAddrsBeforeTransientFailure(t *testing.T) connectivity.Connecting, connectivity.Ready, } + if envconfig.NewPickFirstEnabled { + want = []connectivity.State{ + // The first subconn fails. + connectivity.Connecting, + connectivity.TransientFailure, + // The second subconn connects. + connectivity.Connecting, + connectivity.Ready, + } + } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() for i := 0; i < len(want); i++ { diff --git a/test/resolver_update_test.go b/test/resolver_update_test.go index a7526b9d43c5..619979b9b045 100644 --- a/test/resolver_update_test.go +++ b/test/resolver_update_test.go @@ -162,6 +162,9 @@ func (s) TestResolverUpdate_InvalidServiceConfigAfterGoodUpdate(t *testing.T) { pf := balancer.Get(pickfirst.Name) bd.Data = pf.Build(bd.ClientConn, bd.BuildOptions) }, + Close: func(bd *stub.BalancerData) { + bd.Data.(balancer.Balancer).Close() + }, ParseConfig: func(lbCfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { cfg := &wrappingBalancerConfig{} if err := json.Unmarshal(lbCfg, cfg); err != nil { diff --git a/xds/internal/balancer/clustermanager/clustermanager_test.go b/xds/internal/balancer/clustermanager/clustermanager_test.go index 079214651871..b606cb9e5e34 100644 --- a/xds/internal/balancer/clustermanager/clustermanager_test.go +++ b/xds/internal/balancer/clustermanager/clustermanager_test.go @@ -607,6 +607,7 @@ func TestClusterGracefulSwitch(t *testing.T) { builder := balancer.Get(balancerName) parser := builder.(balancer.ConfigParser) bal := builder.Build(cc, balancer.BuildOptions{}) + defer bal.Close() configJSON1 := `{ "children": { @@ -644,6 +645,9 @@ func TestClusterGracefulSwitch(t *testing.T) { Init: func(bd *stub.BalancerData) { bd.Data = balancer.Get(pickfirst.Name).Build(bd.ClientConn, bd.BuildOptions) }, + Close: func(bd *stub.BalancerData) { + bd.Data.(balancer.Balancer).Close() + }, UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error { bal := bd.Data.(balancer.Balancer) return bal.UpdateClientConnState(ccs) @@ -730,6 +734,7 @@ func (s) TestUpdateStatePauses(t *testing.T) { builder := balancer.Get(balancerName) parser := builder.(balancer.ConfigParser) bal := builder.Build(cc, balancer.BuildOptions{}) + defer bal.Close() configJSON1 := `{ "children": { From b850ea533f746ce3aa7bcc56b041f27e7204de8c Mon Sep 17 00:00:00 2001 From: eshitachandwani <59800922+eshitachandwani@users.noreply.github.com> Date: Thu, 10 Oct 2024 15:34:25 +0530 Subject: [PATCH 07/57] transport : wait for goroutines to exit before transport closes (#7666) --- clientconn.go | 9 ++- internal/transport/http2_client.go | 48 +++++++++++----- internal/transport/keepalive_test.go | 1 + internal/transport/transport_test.go | 83 ++++++++++++++++++++++++++++ 4 files changed, 124 insertions(+), 17 deletions(-) diff --git a/clientconn.go b/clientconn.go index 4a408d621692..19763f8eddfa 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1140,10 +1140,15 @@ func (cc *ClientConn) Close() error { <-cc.resolverWrapper.serializer.Done() <-cc.balancerWrapper.serializer.Done() - + var wg sync.WaitGroup for ac := range conns { - ac.tearDown(ErrClientConnClosing) + wg.Add(1) + go func(ac *addrConn) { + defer wg.Done() + ac.tearDown(ErrClientConnClosing) + }(ac) } + wg.Wait() cc.addTraceEvent("deleted") // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index ba42e51129ed..62b81885d8ef 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -86,9 +86,9 @@ type http2Client struct { writerDone chan struct{} // sync point to enable testing. // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. - goAway chan struct{} - - framer *framer + goAway chan struct{} + keepaliveDone chan struct{} // Closed when the keepalive goroutine exits. + framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. // Do not access controlBuf with mu held. @@ -335,6 +335,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), + keepaliveDone: make(chan struct{}), framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, @@ -1029,6 +1030,12 @@ func (t *http2Client) Close(err error) { } t.cancel() t.conn.Close() + // Waits for the reader and keepalive goroutines to exit before returning to + // ensure all resources are cleaned up before Close can return. + <-t.readerDone + if t.keepaliveEnabled { + <-t.keepaliveDone + } channelz.RemoveEntry(t.channelz.ID) var st *status.Status if len(goAwayDebugMessage) > 0 { @@ -1316,11 +1323,11 @@ func (t *http2Client) handlePing(f *http2.PingFrame) { t.controlBuf.put(pingAck) } -func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return + return nil } if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug @@ -1332,8 +1339,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)) - return + return connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id) } // A client can receive multiple GoAways from the server (see // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first @@ -1350,8 +1356,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // If there are multiple GoAways the first one should always have an ID greater than the following ones. if id > t.prevGoAwayID { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) - return + return connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID) } default: t.setGoAwayReason(f) @@ -1375,8 +1380,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.prevGoAwayID = id if len(t.activeStreams) == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) - return + return connectionErrorf(true, nil, "received goaway and there are no active streams") } streamsToClose := make([]*Stream, 0) @@ -1393,6 +1397,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { for _, stream := range streamsToClose { t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } + return nil } // setGoAwayReason sets the value of t.goAwayReason based @@ -1628,7 +1633,13 @@ func (t *http2Client) readServerPreface() error { // network connection. If the server preface is not read successfully, an // error is pushed to errCh; otherwise errCh is closed with no error. func (t *http2Client) reader(errCh chan<- error) { - defer close(t.readerDone) + var errClose error + defer func() { + close(t.readerDone) + if errClose != nil { + t.Close(errClose) + } + }() if err := t.readServerPreface(); err != nil { errCh <- err @@ -1669,7 +1680,7 @@ func (t *http2Client) reader(errCh chan<- error) { continue } // Transport error. - t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) + errClose = connectionErrorf(true, err, "error reading from server: %v", err) return } switch frame := frame.(type) { @@ -1684,7 +1695,7 @@ func (t *http2Client) reader(errCh chan<- error) { case *http2.PingFrame: t.handlePing(frame) case *http2.GoAwayFrame: - t.handleGoAway(frame) + errClose = t.handleGoAway(frame) case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: @@ -1697,6 +1708,13 @@ func (t *http2Client) reader(errCh chan<- error) { // keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { + var err error + defer func() { + close(t.keepaliveDone) + if err != nil { + t.Close(err) + } + }() p := &ping{data: [8]byte{}} // True iff a ping has been sent, and no data has been received since then. outstandingPing := false @@ -1720,7 +1738,7 @@ func (t *http2Client) keepalive() { continue } if outstandingPing && timeoutLeft <= 0 { - t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) + err = connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout") return } t.mu.Lock() diff --git a/internal/transport/keepalive_test.go b/internal/transport/keepalive_test.go index 393a4540396f..ad377e6b241b 100644 --- a/internal/transport/keepalive_test.go +++ b/internal/transport/keepalive_test.go @@ -44,6 +44,7 @@ import ( ) const defaultTestTimeout = 10 * time.Second +const defaultTestShortTimeout = 10 * time.Millisecond // TestMaxConnectionIdle tests that a server will send GoAway to an idle // client. An idle client is one who doesn't make any RPC calls for a duration diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 65efb30c4bb6..4752c785b59d 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -2781,6 +2781,89 @@ func (s) TestClientSendsAGoAwayFrame(t *testing.T) { } } +// readHangingConn is a wrapper around net.Conn that makes the Read() hang when +// Close() is called. +type readHangingConn struct { + net.Conn + readHangConn chan struct{} // Read() hangs until this channel is closed by Close(). + closed *atomic.Bool // Set to true when Close() is called. +} + +func (hc *readHangingConn) Read(b []byte) (n int, err error) { + n, err = hc.Conn.Read(b) + if hc.closed.Load() { + <-hc.readHangConn // hang the read till we want + } + return n, err +} + +func (hc *readHangingConn) Close() error { + hc.closed.Store(true) + return hc.Conn.Close() +} + +// Tests that closing a client transport does not return until the reader +// goroutine exits. +func (s) TestClientCloseReturnsAfterReaderCompletes(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + server := setUpServerOnly(t, 0, &ServerConfig{}, normal) + defer server.stop() + addr := resolver.Address{Addr: "localhost:" + server.port} + + isReaderHanging := &atomic.Bool{} + readHangConn := make(chan struct{}) + copts := ConnectOptions{ + Dialer: func(_ context.Context, addr string) (net.Conn, error) { + conn, err := net.Dial("tcp", addr) + if err != nil { + return nil, err + } + return &readHangingConn{Conn: conn, readHangConn: readHangConn, closed: isReaderHanging}, nil + }, + ChannelzParent: channelzSubChannel(t), + } + + // Create a client transport with a custom dialer that hangs the Read() + // after Close(). + ct, err := NewClientTransport(ctx, context.Background(), addr, copts, func(GoAwayReason) {}) + if err != nil { + t.Fatalf("Failed to create transport: %v", err) + } + + if _, err := ct.NewStream(ctx, &CallHdr{}); err != nil { + t.Fatalf("Failed to open stream: %v", err) + } + + // Closing the client transport will result in the underlying net.Conn being + // closed, which will result in readHangingConn.Read() to hang. This will + // stall the exit of the reader goroutine, and will stall client + // transport's Close from returning. + transportClosed := make(chan struct{}) + go func() { + ct.Close(errors.New("manually closed by client")) + close(transportClosed) + }() + + // Wait for a short duration and ensure that the client transport's Close() + // does not return. + select { + case <-transportClosed: + t.Fatal("Transport closed before reader completed") + case <-time.After(defaultTestShortTimeout): + } + + // Closing the channel will unblock the reader goroutine and will ensure + // that the client transport's Close() returns. + close(readHangConn) + select { + case <-transportClosed: + case <-time.After(defaultTestTimeout): + t.Fatal("Timeout when waiting for transport to close") + } +} + // hangingConn is a net.Conn wrapper for testing, simulating hanging connections // after a GOAWAY frame is sent, of which Write operations pause until explicitly // signaled or a timeout occurs. From ad81c20503be8c36d929741078e1a53a292e4048 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 14 Oct 2024 07:57:45 -0700 Subject: [PATCH 08/57] pickfirstleaf: minor simplification to reconcileSubConnsLocked method (#7731) --- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index 48ce8c50e5c1..985b6edc7f4c 100644 --- a/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -314,21 +314,22 @@ func deDupAddresses(addrs []resolver.Address) []resolver.Address { return retAddrs } +// reconcileSubConnsLocked updates the active subchannels based on a new address +// list from the resolver. It does this by: +// - closing subchannels: any existing subchannels associated with addresses +// that are no longer in the updated list are shut down. +// - removing subchannels: entries for these closed subchannels are removed +// from the subchannel map. +// +// This ensures that the subchannel map accurately reflects the current set of +// addresses received from the name resolver. func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { - // Remove old subConns that were not in new address list. - oldAddrsMap := resolver.NewAddressMap() - for _, k := range b.subConns.Keys() { - oldAddrsMap.Set(k, true) - } - - // Flatten the new endpoint addresses. newAddrsMap := resolver.NewAddressMap() for _, addr := range newAddrs { newAddrsMap.Set(addr, true) } - // Shut them down and remove them. - for _, oldAddr := range oldAddrsMap.Keys() { + for _, oldAddr := range b.subConns.Keys() { if _, ok := newAddrsMap.Get(oldAddr); ok { continue } From 54841eff8c107067337510cda5b304df1f1bf1bc Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 15 Oct 2024 13:51:45 -0400 Subject: [PATCH 09/57] stats/opentelemetry/csm: Get mesh_id local label from "CSM_MESH_ID" environment variable, rather than parsing from bootstrap file (#7740) --- stats/opentelemetry/csm/observability_test.go | 22 +++---- stats/opentelemetry/csm/pluginoption.go | 37 +----------- stats/opentelemetry/csm/pluginoption_test.go | 57 ++----------------- 3 files changed, 18 insertions(+), 98 deletions(-) diff --git a/stats/opentelemetry/csm/observability_test.go b/stats/opentelemetry/csm/observability_test.go index 7b498cb50879..520d353a6707 100644 --- a/stats/opentelemetry/csm/observability_test.go +++ b/stats/opentelemetry/csm/observability_test.go @@ -32,8 +32,6 @@ import ( "google.golang.org/grpc/encoding/gzip" istats "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/stubserver" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/internal/testutils/xds/e2e" testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" "google.golang.org/grpc/metadata" @@ -46,12 +44,11 @@ import ( // Env Vars as well, and mocks the resource detector's returned attribute set to // simulate the environment. It registers a cleanup function on the provided t // to restore the environment to its original state. -func setupEnv(t *testing.T, resourceDetectorEmissions map[string]string, nodeID, csmCanonicalServiceName, csmWorkloadName string) { - bootstrapContents := e2e.DefaultBootstrapContents(t, nodeID, "xds_server_uri") - testutils.CreateBootstrapFileForTesting(t, bootstrapContents) - +func setupEnv(t *testing.T, resourceDetectorEmissions map[string]string, meshID, csmCanonicalServiceName, csmWorkloadName string) { + oldCSMMeshID, csmMeshIDPresent := os.LookupEnv("CSM_MESH_ID") oldCSMCanonicalServiceName, csmCanonicalServiceNamePresent := os.LookupEnv("CSM_CANONICAL_SERVICE_NAME") oldCSMWorkloadName, csmWorkloadNamePresent := os.LookupEnv("CSM_WORKLOAD_NAME") + os.Setenv("CSM_MESH_ID", meshID) os.Setenv("CSM_CANONICAL_SERVICE_NAME", csmCanonicalServiceName) os.Setenv("CSM_WORKLOAD_NAME", csmWorkloadName) @@ -67,6 +64,11 @@ func setupEnv(t *testing.T, resourceDetectorEmissions map[string]string, nodeID, return &attrSet } t.Cleanup(func() { + if csmMeshIDPresent { + os.Setenv("CSM_MESH_ID", oldCSMMeshID) + } else { + os.Unsetenv("CSM_MESH_ID") + } if csmCanonicalServiceNamePresent { os.Setenv("CSM_CANONICAL_SERVICE_NAME", oldCSMCanonicalServiceName) } else { @@ -99,10 +101,10 @@ func (s) TestCSMPluginOptionUnary(t *testing.T) { "k8s.namespace.name": "k8s_namespace_name_val", "k8s.cluster.name": "k8s_cluster_name_val", } - const nodeID = "projects/12345/networks/mesh:mesh_id/nodes/aaaa-aaaa-aaaa-aaaa" + const meshID = "mesh_id" const csmCanonicalServiceName = "csm_canonical_service_name" const csmWorkloadName = "csm_workload_name" - setupEnv(t, resourceDetectorEmissions, nodeID, csmCanonicalServiceName, csmWorkloadName) + setupEnv(t, resourceDetectorEmissions, meshID, csmCanonicalServiceName, csmWorkloadName) attributesWant := map[string]string{ "csm.workload_canonical_service": csmCanonicalServiceName, // from env @@ -266,10 +268,10 @@ func (s) TestCSMPluginOptionStreaming(t *testing.T) { "k8s.namespace.name": "k8s_namespace_name_val", "k8s.cluster.name": "k8s_cluster_name_val", } - const nodeID = "projects/12345/networks/mesh:mesh_id/nodes/aaaa-aaaa-aaaa-aaaa" + const meshID = "mesh_id" const csmCanonicalServiceName = "csm_canonical_service_name" const csmWorkloadName = "csm_workload_name" - setupEnv(t, resourceDetectorEmissions, nodeID, csmCanonicalServiceName, csmWorkloadName) + setupEnv(t, resourceDetectorEmissions, meshID, csmCanonicalServiceName, csmWorkloadName) attributesWant := map[string]string{ "csm.workload_canonical_service": csmCanonicalServiceName, // from env diff --git a/stats/opentelemetry/csm/pluginoption.go b/stats/opentelemetry/csm/pluginoption.go index e415f2f53588..62a8d3813028 100644 --- a/stats/opentelemetry/csm/pluginoption.go +++ b/stats/opentelemetry/csm/pluginoption.go @@ -24,13 +24,10 @@ import ( "encoding/base64" "net/url" "os" - "strings" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/xds/bootstrap" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats/opentelemetry/internal" - "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/structpb" @@ -233,24 +230,6 @@ func constructMetadataFromEnv(ctx context.Context) (map[string]string, string) { return initializeLocalAndMetadataLabels(labels) } -// parseMeshIDString parses the mesh id from the node id according to the format -// "projects/[GCP Project number]/networks/mesh:[Mesh ID]/nodes/[UUID]". Returns -// "unknown" if there is a syntax error in the node ID. -func parseMeshIDFromNodeID(nodeID string) string { - meshSplit := strings.Split(nodeID, "/") - if len(meshSplit) != 6 { - return "unknown" - } - if meshSplit[0] != "projects" || meshSplit[2] != "networks" || meshSplit[4] != "nodes" { - return "unknown" - } - meshID, ok := strings.CutPrefix(meshSplit[3], "mesh:") - if !ok { // errors become "unknown" - return "unknown" - } - return meshID -} - // initializeLocalAndMetadataLabels csm local labels for a CSM Plugin Option to // record. It also builds out a base 64 encoded protobuf.Struct containing the // metadata exchange labels to be sent as part of metadata exchange from a CSM @@ -261,9 +240,7 @@ func initializeLocalAndMetadataLabels(labels map[string]string) (map[string]stri val := labels["canonical_service"] localLabels := make(map[string]string) localLabels["csm.workload_canonical_service"] = val - // Get the CSM Mesh ID from the bootstrap file. - nodeID := getNodeID() - localLabels["csm.mesh_id"] = parseMeshIDFromNodeID(nodeID) + localLabels["csm.mesh_id"] = getEnv("CSM_MESH_ID") // Metadata exchange labels - can go ahead and encode into proto, and then // base64. @@ -288,18 +265,6 @@ func initializeLocalAndMetadataLabels(labels map[string]string) (map[string]stri return localLabels, metadataExchangeLabelsEncoded } -// getNodeID gets the Node ID from the bootstrap data. -func getNodeID() string { - cfg, err := bootstrap.GetConfiguration() - if err != nil { - return "" // will become "unknown" - } - if cfg.Node() == nil { - return "" - } - return cfg.Node().GetId() -} - // metadataExchangeKey is the key for HTTP metadata exchange. const metadataExchangeKey = "x-envoy-peer-metadata" diff --git a/stats/opentelemetry/csm/pluginoption_test.go b/stats/opentelemetry/csm/pluginoption_test.go index 8588c545360c..55e959760918 100644 --- a/stats/opentelemetry/csm/pluginoption_test.go +++ b/stats/opentelemetry/csm/pluginoption_test.go @@ -29,8 +29,6 @@ import ( "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/metadata" "github.com/google/go-cmp/cmp" @@ -304,51 +302,6 @@ func (s) TestDetermineTargetCSM(t *testing.T) { } } -func (s) TestBootstrap(t *testing.T) { - tests := []struct { - name string - nodeID string - meshIDWant string - }{ - { - name: "malformed-node-id-unknown", - nodeID: "malformed", - meshIDWant: "unknown", - }, - { - name: "node-id-parsed", - nodeID: "projects/12345/networks/mesh:mesh_id/nodes/aaaa-aaaa-aaaa-aaaa", - meshIDWant: "mesh_id", - }, - { - name: "wrong-syntax-unknown", - nodeID: "wrong-syntax/12345/networks/mesh:mesh_id/nodes/aaaa-aaaa-aaaa-aaaa", - meshIDWant: "unknown", - }, - { - name: "node-id-parsed", - nodeID: "projects/12345/networks/mesh:/nodes/aaaa-aaaa-aaaa-aaaa", - meshIDWant: "", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - bootstrapContents := e2e.DefaultBootstrapContents(t, test.nodeID, "xds_server_uri") - testutils.CreateBootstrapFileForTesting(t, bootstrapContents) - nodeIDGot := getNodeID() // this should return the node ID plumbed into bootstrap above - if nodeIDGot != test.nodeID { - t.Fatalf("getNodeID: got %v, want %v", nodeIDGot, test.nodeID) - } - - meshIDGot := parseMeshIDFromNodeID(nodeIDGot) - if meshIDGot != test.meshIDWant { - t.Fatalf("parseMeshIDFromNodeID(%v): got %v, want %v", nodeIDGot, meshIDGot, test.meshIDWant) - } - }) - } -} - // TestSetLabels tests the setting of labels, which snapshots the resource and // environment. It mocks the resource and environment, and then calls into // labels creation. It verifies to local labels created and metadata exchange @@ -360,14 +313,14 @@ func (s) TestSetLabels(t *testing.T) { resourceKeyValues map[string]string csmCanonicalServiceNamePopulated bool csmWorkloadNamePopulated bool - bootstrapGeneratorPopulated bool + meshIDPopulated bool localLabelsWant map[string]string metadataExchangeLabelsWant map[string]string }{ { name: "no-type", csmCanonicalServiceNamePopulated: true, - bootstrapGeneratorPopulated: true, + meshIDPopulated: true, resourceKeyValues: map[string]string{}, localLabelsWant: map[string]string{ "csm.workload_canonical_service": "canonical_service_name_val", // env var populated so should be set. @@ -480,9 +433,9 @@ func (s) TestSetLabels(t *testing.T) { os.Setenv("CSM_WORKLOAD_NAME", "workload_name_val") defer os.Unsetenv("CSM_WORKLOAD_NAME") } - if test.bootstrapGeneratorPopulated { - bootstrapContents := e2e.DefaultBootstrapContents(t, "projects/12345/networks/mesh:mesh_id/nodes/aaaa-aaaa-aaaa-aaaa", "xds_server_uri") - testutils.CreateBootstrapFileForTesting(t, bootstrapContents) + if test.meshIDPopulated { + os.Setenv("CSM_MESH_ID", "mesh_id") + defer os.Unsetenv("CSM_MESH_ID") } var attributes []attribute.KeyValue for k, v := range test.resourceKeyValues { From 4544b8a4cfe9bb4882ec3591631b83dac7434805 Mon Sep 17 00:00:00 2001 From: Purnesh Dixit Date: Wed, 16 Oct 2024 11:14:01 +0530 Subject: [PATCH 10/57] Change version to 1.69.0-dev (#7746) --- version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version.go b/version.go index d690c2b27dec..a5b038829dfb 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.68.0-dev" +const Version = "1.69.0-dev" From 569c8eb0af3277c4ed1d0dfae580d299431de64d Mon Sep 17 00:00:00 2001 From: Arjan Singh Bal <46515553+arjan-bal@users.noreply.github.com> Date: Wed, 16 Oct 2024 23:00:57 +0530 Subject: [PATCH 11/57] vet: Use go1.22 instead of go1.21 for tidy and staticcheck(#7747) --- scripts/vet.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/vet.sh b/scripts/vet.sh index 3b8943b0fa65..aba59a5995bc 100755 --- a/scripts/vet.sh +++ b/scripts/vet.sh @@ -97,13 +97,13 @@ for MOD_FILE in $(find . -name 'go.mod'); do gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - go mod tidy -compat=1.21 + go mod tidy -compat=1.22 git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) # - Collection of static analysis checks SC_OUT="$(mktemp)" - staticcheck -go 1.21 -checks 'all' ./... >"${SC_OUT}" || true + staticcheck -go 1.22 -checks 'all' ./... >"${SC_OUT}" || true # Error for anything other than checks that need exclusions. noret_grep -v "(ST1000)" "${SC_OUT}" | noret_grep -v "(SA1019)" | noret_grep -v "(ST1003)" | noret_grep -v "(ST1019)\|\(other import of\)" | not grep -v "(SA4000)" From 6cd00c93260b1589ab6734b4ea5fc5b632b9a5b1 Mon Sep 17 00:00:00 2001 From: luxcgo Date: Thu, 17 Oct 2024 03:51:15 +0800 Subject: [PATCH 12/57] clientconn: remove redundant check (#7700) --- clientconn.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/clientconn.go b/clientconn.go index 19763f8eddfa..188109bb72ef 100644 --- a/clientconn.go +++ b/clientconn.go @@ -775,10 +775,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) } } - var balCfg serviceconfig.LoadBalancingConfig - if cc.sc != nil && cc.sc.lbConfig != nil { - balCfg = cc.sc.lbConfig - } + balCfg := cc.sc.lbConfig bw := cc.balancerWrapper cc.mu.Unlock() From ec10e73f02a38fcf1e4f6e3cdee5f73c89bfa4ac Mon Sep 17 00:00:00 2001 From: luxcgo Date: Thu, 17 Oct 2024 04:09:35 +0800 Subject: [PATCH 13/57] transport: refactor `trInFlow.onData` to eliminate redundant logic (#7734) --- internal/transport/flowcontrol.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/internal/transport/flowcontrol.go b/internal/transport/flowcontrol.go index 97198c515889..dfc0f224ec87 100644 --- a/internal/transport/flowcontrol.go +++ b/internal/transport/flowcontrol.go @@ -92,14 +92,11 @@ func (f *trInFlow) newLimit(n uint32) uint32 { func (f *trInFlow) onData(n uint32) uint32 { f.unacked += n - if f.unacked >= f.limit/4 { - w := f.unacked - f.unacked = 0 + if f.unacked < f.limit/4 { f.updateEffectiveWindowSize() - return w + return 0 } - f.updateEffectiveWindowSize() - return 0 + return f.reset() } func (f *trInFlow) reset() uint32 { From d2ded4bcaa267dbc6c44f1480ea66b9ff38d06b1 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 16 Oct 2024 20:40:33 -0700 Subject: [PATCH 14/57] xdsclient: new Transport interface and LRS stream implementation (#7717) --- xds/internal/xdsclient/internal/internal.go | 5 +- .../transport/grpctransport/grpctransport.go | 138 ++++++++ .../grpctransport/grpctransport_ext_test.go | 91 +++++ .../xdsclient/transport/lrs/lrs_stream.go | 333 ++++++++++++++++++ .../transport/transport_interface.go | 67 ++++ 5 files changed, 633 insertions(+), 1 deletion(-) create mode 100644 xds/internal/xdsclient/transport/grpctransport/grpctransport.go create mode 100644 xds/internal/xdsclient/transport/grpctransport/grpctransport_ext_test.go create mode 100644 xds/internal/xdsclient/transport/lrs/lrs_stream.go create mode 100644 xds/internal/xdsclient/transport/transport_interface.go diff --git a/xds/internal/xdsclient/internal/internal.go b/xds/internal/xdsclient/internal/internal.go index e12610744109..b66697206c08 100644 --- a/xds/internal/xdsclient/internal/internal.go +++ b/xds/internal/xdsclient/internal/internal.go @@ -20,6 +20,9 @@ package internal // The following vars can be overridden by tests. var ( - // NewADSStream is a function that returns a new ADS stream. + // GRPCNewClient returns a new gRPC Client. + GRPCNewClient any // func(string, ...grpc.DialOption) (*grpc.ClientConn, error) + + // NewADSStream returns a new ADS stream. NewADSStream any // func(context.Context, *grpc.ClientConn) (v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient, error) ) diff --git a/xds/internal/xdsclient/transport/grpctransport/grpctransport.go b/xds/internal/xdsclient/transport/grpctransport/grpctransport.go new file mode 100644 index 000000000000..9bb8c737ffcd --- /dev/null +++ b/xds/internal/xdsclient/transport/grpctransport/grpctransport.go @@ -0,0 +1,138 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package grpctransport provides an implementation of the transport interface +// using gRPC. +package grpctransport + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/xds/internal/xdsclient/internal" + "google.golang.org/grpc/xds/internal/xdsclient/transport" + + v3adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + v3adspb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + v3lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" + v3lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" +) + +func init() { + internal.GRPCNewClient = grpc.NewClient + internal.NewADSStream = func(ctx context.Context, cc *grpc.ClientConn) (v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient, error) { + return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx) + } +} + +// Builder provides a way to build a gRPC-based transport to an xDS server. +type Builder struct{} + +// Build creates a new gRPC-based transport to an xDS server using the provided +// options. This involves creating a grpc.ClientConn to the server identified by +// the server URI in the provided options. +func (b *Builder) Build(opts transport.BuildOptions) (transport.Interface, error) { + if opts.ServerConfig == nil { + return nil, fmt.Errorf("ServerConfig field in opts cannot be nil") + } + + // NOTE: The bootstrap package ensures that the server_uri and credentials + // inside the server config are always populated. If we end up using a + // different type in BuildOptions to specify the server configuration, we + // must ensure that those fields are not empty before proceeding. + + // Dial the xDS management server with dial options specified by the server + // configuration and a static keepalive configuration that is common across + // gRPC language implementations. + kpCfg := grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 5 * time.Minute, + Timeout: 20 * time.Second, + }) + dopts := append(opts.ServerConfig.DialOptions(), kpCfg) + dialer := internal.GRPCNewClient.(func(string, ...grpc.DialOption) (*grpc.ClientConn, error)) + cc, err := dialer(opts.ServerConfig.ServerURI(), dopts...) + if err != nil { + // An error from a non-blocking dial indicates something serious. + return nil, fmt.Errorf("failed to create a grpc transport to the management server %q: %v", opts.ServerConfig.ServerURI(), err) + } + cc.Connect() + + return &grpcTransport{cc: cc}, nil +} + +type grpcTransport struct { + cc *grpc.ClientConn +} + +func (g *grpcTransport) CreateStreamingCall(ctx context.Context, method string) (transport.StreamingCall, error) { + switch method { + case v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResources_FullMethodName: + return g.newADSStreamingCall(ctx) + case v3lrsgrpc.LoadReportingService_StreamLoadStats_FullMethodName: + return g.newLRSStreamingCall(ctx) + default: + return nil, fmt.Errorf("unsupported method: %v", method) + } +} + +func (g *grpcTransport) newADSStreamingCall(ctx context.Context) (transport.StreamingCall, error) { + newStream := internal.NewADSStream.(func(context.Context, *grpc.ClientConn) (v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient, error)) + stream, err := newStream(ctx, g.cc) + if err != nil { + return nil, fmt.Errorf("failed to create an ADS stream: %v", err) + } + return &adsStream{stream: stream}, nil +} + +func (g *grpcTransport) newLRSStreamingCall(ctx context.Context) (transport.StreamingCall, error) { + stream, err := v3lrsgrpc.NewLoadReportingServiceClient(g.cc).StreamLoadStats(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create an LRS stream: %v", err) + } + return &lrsStream{stream: stream}, nil +} + +func (g *grpcTransport) Close() error { + return g.cc.Close() +} + +type adsStream struct { + stream v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient +} + +func (a *adsStream) Send(msg any) error { + return a.stream.Send(msg.(*v3adspb.DiscoveryRequest)) +} + +func (a *adsStream) Recv() (any, error) { + return a.stream.Recv() +} + +type lrsStream struct { + stream v3lrsgrpc.LoadReportingService_StreamLoadStatsClient +} + +func (l *lrsStream) Send(msg any) error { + return l.stream.Send(msg.(*v3lrspb.LoadStatsRequest)) +} + +func (l *lrsStream) Recv() (any, error) { + return l.stream.Recv() +} diff --git a/xds/internal/xdsclient/transport/grpctransport/grpctransport_ext_test.go b/xds/internal/xdsclient/transport/grpctransport/grpctransport_ext_test.go new file mode 100644 index 000000000000..8831fb8299a7 --- /dev/null +++ b/xds/internal/xdsclient/transport/grpctransport/grpctransport_ext_test.go @@ -0,0 +1,91 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package grpctransport_test + +import ( + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/internal/grpctest" + internalbootstrap "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/internal" + "google.golang.org/grpc/xds/internal/xdsclient/transport" + "google.golang.org/grpc/xds/internal/xdsclient/transport/grpctransport" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// Tests that the grpctransport.Builder creates a new grpc.ClientConn every time +// Build() is called. +func (s) TestBuild_CustomDialer(t *testing.T) { + // Override the dialer with a custom one. + customDialerCalled := false + origDialer := internal.GRPCNewClient + internal.GRPCNewClient = func(target string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + customDialerCalled = true + return grpc.NewClient(target, opts...) + } + defer func() { internal.GRPCNewClient = origDialer }() + + serverCfg, err := internalbootstrap.ServerConfigForTesting(internalbootstrap.ServerConfigTestingOptions{URI: "server-address"}) + if err != nil { + t.Fatalf("Failed to create server config for testing: %v", err) + } + + // Create a new transport and ensure that the custom dialer was called. + opts := transport.BuildOptions{ServerConfig: serverCfg} + builder := &grpctransport.Builder{} + tr, err := builder.Build(opts) + if err != nil { + t.Fatalf("Builder.Build(%+v) failed: %v", opts, err) + } + defer tr.Close() + + if !customDialerCalled { + t.Fatalf("Builder.Build(%+v): custom dialer called = false, want true", opts) + } + customDialerCalled = false + + // Create another transport and ensure that the custom dialer was called. + tr, err = builder.Build(opts) + if err != nil { + t.Fatalf("Builder.Build(%+v) failed: %v", opts, err) + } + defer tr.Close() + + if !customDialerCalled { + t.Fatalf("Builder.Build(%+v): custom dialer called = false, want true", opts) + } +} + +// Tests that the grpctransport.Builder fails to build a transport when the +// provided BuildOptions do not contain a ServerConfig. +func (s) TestBuild_EmptyServerConfig(t *testing.T) { + builder := &grpctransport.Builder{} + opts := transport.BuildOptions{} + if tr, err := builder.Build(opts); err == nil { + tr.Close() + t.Fatalf("Builder.Build(%+v) succeeded when expected to fail", opts) + } +} diff --git a/xds/internal/xdsclient/transport/lrs/lrs_stream.go b/xds/internal/xdsclient/transport/lrs/lrs_stream.go new file mode 100644 index 000000000000..36e70bc7170f --- /dev/null +++ b/xds/internal/xdsclient/transport/lrs/lrs_stream.go @@ -0,0 +1,333 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package lrs provides the implementation of an LRS (Load Reporting Service) +// stream for the xDS client. +package lrs + +import ( + "context" + "fmt" + "io" + "sync" + "time" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + igrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/transport" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" +) + +// Any per-RPC level logs which print complete request or response messages +// should be gated at this verbosity level. Other per-RPC level logs which print +// terse output should be at `INFO` and verbosity 2. +const perRPCVerbosityLevel = 9 + +// StreamImpl provides all the functionality associated with an LRS (Load Reporting +// Service) stream on the client-side. It manages the lifecycle of the LRS stream, +// including starting, stopping, and retrying the stream. It also provides a +// load.Store that can be used to report load, and a cleanup function that should +// be called when the load reporting is no longer needed. +type StreamImpl struct { + // The following fields are initialized when a Stream instance is created + // and are read-only afterwards, and hence can be accessed without a mutex. + transport transport.Interface // Transport to use for LRS stream. + backoff func(int) time.Duration // Backoff for retries after stream failures. + nodeProto *v3corepb.Node // Identifies the gRPC application. + doneCh chan struct{} // To notify exit of LRS goroutine. + logger *igrpclog.PrefixLogger + + // Guards access to the below fields. + mu sync.Mutex + cancelStream context.CancelFunc // Cancel the stream. If nil, the stream is not active. + refCount int // Number of interested parties. + lrsStore *load.Store // Store returned to user for pushing loads. +} + +// StreamOpts holds the options for creating an lrsStream. +type StreamOpts struct { + Transport transport.Interface // xDS transport to create the stream on. + Backoff func(int) time.Duration // Backoff for retries after stream failures. + NodeProto *v3corepb.Node // Node proto to identify the gRPC application. + LogPrefix string // Prefix to be used for log messages. +} + +// NewStreamImpl creates a new StreamImpl with the provided options. +// +// The actual streaming RPC call is initiated when the first call to ReportLoad +// is made, and is terminated when the last call to ReportLoad is canceled. +func NewStreamImpl(opts StreamOpts) *StreamImpl { + lrs := &StreamImpl{ + transport: opts.Transport, + backoff: opts.Backoff, + nodeProto: opts.NodeProto, + lrsStore: load.NewStore(), + } + + l := grpclog.Component("xds") + lrs.logger = igrpclog.NewPrefixLogger(l, opts.LogPrefix+fmt.Sprintf("[lrs-stream %p] ", lrs)) + return lrs +} + +// ReportLoad returns a load.Store that can be used to report load, and a +// cleanup function that should be called when the load reporting is no longer +// needed. +// +// The first call to ReportLoad sets the reference count to one, and starts the +// LRS streaming call. Subsequent calls increment the reference count and return +// the same load.Store. +// +// The cleanup function decrements the reference count and stops the LRS stream +// when the last reference is removed. +func (lrs *StreamImpl) ReportLoad() (*load.Store, func()) { + lrs.mu.Lock() + defer lrs.mu.Unlock() + + cleanup := grpcsync.OnceFunc(func() { + lrs.mu.Lock() + defer lrs.mu.Unlock() + + if lrs.refCount == 0 { + lrs.logger.Errorf("Attempting to stop already stopped StreamImpl") + return + } + lrs.refCount-- + if lrs.refCount != 0 { + return + } + lrs.cancelStream() + lrs.cancelStream = nil + lrs.logger.Infof("Stopping StreamImpl") + }) + + if lrs.refCount != 0 { + lrs.refCount++ + return lrs.lrsStore, cleanup + } + + lrs.refCount++ + ctx, cancel := context.WithCancel(context.Background()) + lrs.cancelStream = cancel + lrs.doneCh = make(chan struct{}) + go lrs.runner(ctx) + return lrs.lrsStore, cleanup +} + +// runner is responsible for managing the lifetime of an LRS streaming call. It +// creates the stream, sends the initial LoadStatsRequest, receives the first +// LoadStatsResponse, and then starts a goroutine to periodically send +// LoadStatsRequests. The runner will restart the stream if it encounters any +// errors. +func (lrs *StreamImpl) runner(ctx context.Context) { + defer close(lrs.doneCh) + + // This feature indicates that the client supports the + // LoadStatsResponse.send_all_clusters field in the LRS response. + node := proto.Clone(lrs.nodeProto).(*v3corepb.Node) + node.ClientFeatures = append(node.ClientFeatures, "envoy.lrs.supports_send_all_clusters") + + runLoadReportStream := func() error { + // streamCtx is created and canceled in case we terminate the stream + // early for any reason, to avoid gRPC-Go leaking the RPC's monitoring + // goroutine. + streamCtx, cancel := context.WithCancel(ctx) + defer cancel() + + stream, err := lrs.transport.CreateStreamingCall(streamCtx, "/envoy.service.load_stats.v3.LoadReportingService/StreamLoadStats") + if err != nil { + lrs.logger.Warningf("Failed to create new LRS streaming RPC: %v", err) + return nil + } + if lrs.logger.V(2) { + lrs.logger.Infof("LRS stream created") + } + + if err := lrs.sendFirstLoadStatsRequest(stream, node); err != nil { + lrs.logger.Warningf("Sending first LRS request failed: %v", err) + return nil + } + + clusters, interval, err := lrs.recvFirstLoadStatsResponse(stream) + if err != nil { + lrs.logger.Warningf("Reading from LRS streaming RPC failed: %v", err) + return nil + } + + // We reset backoff state when we successfully receive at least one + // message from the server. + lrs.sendLoads(streamCtx, stream, clusters, interval) + return backoff.ErrResetBackoff + } + backoff.RunF(ctx, runLoadReportStream, lrs.backoff) +} + +// sendLoads is responsible for periodically sending load reports to the LRS +// server at the specified interval for the specified clusters, until the passed +// in context is canceled. +func (lrs *StreamImpl) sendLoads(ctx context.Context, stream transport.StreamingCall, clusterNames []string, interval time.Duration) { + tick := time.NewTicker(interval) + defer tick.Stop() + for { + select { + case <-tick.C: + case <-ctx.Done(): + return + } + if err := lrs.sendLoadStatsRequest(stream, lrs.lrsStore.Stats(clusterNames)); err != nil { + lrs.logger.Warningf("Writing to LRS stream failed: %v", err) + return + } + } +} + +func (lrs *StreamImpl) sendFirstLoadStatsRequest(stream transport.StreamingCall, node *v3corepb.Node) error { + req := &v3lrspb.LoadStatsRequest{Node: node} + if lrs.logger.V(perRPCVerbosityLevel) { + lrs.logger.Infof("Sending initial LoadStatsRequest: %s", pretty.ToJSON(req)) + } + err := stream.Send(req) + if err == io.EOF { + return getStreamError(stream) + } + return err +} + +// recvFirstLoadStatsResponse receives the first LoadStatsResponse from the LRS +// server. Returns the following: +// - a list of cluster names requested by the server or an empty slice if the +// server requested for load from all clusters +// - the load reporting interval, and +// - any error encountered +func (lrs *StreamImpl) recvFirstLoadStatsResponse(stream transport.StreamingCall) ([]string, time.Duration, error) { + r, err := stream.Recv() + if err != nil { + return nil, 0, fmt.Errorf("lrs: failed to receive first LoadStatsResponse: %v", err) + } + resp, ok := r.(*v3lrspb.LoadStatsResponse) + if !ok { + return nil, time.Duration(0), fmt.Errorf("lrs: unexpected message type %T", r) + } + if lrs.logger.V(perRPCVerbosityLevel) { + lrs.logger.Infof("Received first LoadStatsResponse: %s", pretty.ToJSON(resp)) + } + + internal := resp.GetLoadReportingInterval() + if internal.CheckValid() != nil { + return nil, 0, fmt.Errorf("lrs: invalid load_reporting_interval: %v", err) + } + loadReportingInterval := internal.AsDuration() + + clusters := resp.Clusters + if resp.SendAllClusters { + // Return an empty slice to send stats for all clusters. + clusters = []string{} + } + + return clusters, loadReportingInterval, nil +} + +func (lrs *StreamImpl) sendLoadStatsRequest(stream transport.StreamingCall, loads []*load.Data) error { + clusterStats := make([]*v3endpointpb.ClusterStats, 0, len(loads)) + for _, sd := range loads { + droppedReqs := make([]*v3endpointpb.ClusterStats_DroppedRequests, 0, len(sd.Drops)) + for category, count := range sd.Drops { + droppedReqs = append(droppedReqs, &v3endpointpb.ClusterStats_DroppedRequests{ + Category: category, + DroppedCount: count, + }) + } + localityStats := make([]*v3endpointpb.UpstreamLocalityStats, 0, len(sd.LocalityStats)) + for l, localityData := range sd.LocalityStats { + lid, err := internal.LocalityIDFromString(l) + if err != nil { + return err + } + loadMetricStats := make([]*v3endpointpb.EndpointLoadMetricStats, 0, len(localityData.LoadStats)) + for name, loadData := range localityData.LoadStats { + loadMetricStats = append(loadMetricStats, &v3endpointpb.EndpointLoadMetricStats{ + MetricName: name, + NumRequestsFinishedWithMetric: loadData.Count, + TotalMetricValue: loadData.Sum, + }) + } + localityStats = append(localityStats, &v3endpointpb.UpstreamLocalityStats{ + Locality: &v3corepb.Locality{ + Region: lid.Region, + Zone: lid.Zone, + SubZone: lid.SubZone, + }, + TotalSuccessfulRequests: localityData.RequestStats.Succeeded, + TotalRequestsInProgress: localityData.RequestStats.InProgress, + TotalErrorRequests: localityData.RequestStats.Errored, + TotalIssuedRequests: localityData.RequestStats.Issued, + LoadMetricStats: loadMetricStats, + UpstreamEndpointStats: nil, // TODO: populate for per endpoint loads. + }) + } + + clusterStats = append(clusterStats, &v3endpointpb.ClusterStats{ + ClusterName: sd.Cluster, + ClusterServiceName: sd.Service, + UpstreamLocalityStats: localityStats, + TotalDroppedRequests: sd.TotalDrops, + DroppedRequests: droppedReqs, + LoadReportInterval: durationpb.New(sd.ReportInterval), + }) + } + + req := &v3lrspb.LoadStatsRequest{ClusterStats: clusterStats} + if lrs.logger.V(perRPCVerbosityLevel) { + lrs.logger.Infof("Sending LRS loads: %s", pretty.ToJSON(req)) + } + err := stream.Send(req) + if err == io.EOF { + return getStreamError(stream) + } + return err +} + +func getStreamError(stream transport.StreamingCall) error { + for { + if _, err := stream.Recv(); err != nil { + return err + } + } +} + +// Stop blocks until the stream is closed and all spawned goroutines exit. +func (lrs *StreamImpl) Stop() { + lrs.mu.Lock() + defer lrs.mu.Unlock() + + if lrs.cancelStream == nil { + return + } + lrs.cancelStream() + lrs.cancelStream = nil + lrs.logger.Infof("Stopping LRS stream") + <-lrs.doneCh +} diff --git a/xds/internal/xdsclient/transport/transport_interface.go b/xds/internal/xdsclient/transport/transport_interface.go new file mode 100644 index 000000000000..db8a19931916 --- /dev/null +++ b/xds/internal/xdsclient/transport/transport_interface.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package transport defines the interface that describe the functionality +// required to communicate with an xDS server using streaming calls. +package transport + +import ( + "context" + + "google.golang.org/grpc/internal/xds/bootstrap" +) + +// Builder is an interface for building a new xDS transport. +type Builder interface { + // Build creates a new xDS transport with the provided options. + Build(opts BuildOptions) (Transport, error) +} + +// BuildOptions contains the options for building a new xDS transport. +type BuildOptions struct { + // ServerConfig contains the configuration that controls how the transport + // interacts with the XDS server. This includes the server URI and the + // credentials to use to connect to the server, among other things. + ServerConfig *bootstrap.ServerConfig +} + +// Interface provides the functionality to communicate with an XDS server using +// streaming calls. +// +// TODO(easwars): Rename this to Transport once the existing Transport type is +// removed. +type Interface interface { + // CreateStreamingCall creates a new streaming call to the XDS server for the + // specified method name. The returned StreamingCall interface can be used to + // send and receive messages on the stream. + CreateStreamingCall(context.Context, string) (StreamingCall, error) + + // Close closes the underlying connection and cleans up any resources used by the + // Transport. + Close() error +} + +// StreamingCall is an interface that provides a way to send and receive +// messages on a stream. The methods accept or return any.Any messages instead +// of concrete types to allow this interface to be used for both ADS and LRS. +type StreamingCall interface { + // Send sends the provided message on the stream. + Send(any) error + + // Recv block until the next message is received on the stream. + Recv() (any, error) +} From 830135e6c5a351abf75f0c9cfdf978e5df8daeba Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 16 Oct 2024 20:56:53 -0700 Subject: [PATCH 15/57] xdsclient: new Transport interface and ADS stream implementation (#7721) --- xds/internal/xdsclient/internal/internal.go | 6 + .../xdsclient/transport/ads/ads_stream.go | 804 ++++++++++++++++++ .../grpctransport/grpctransport_ext_test.go | 2 +- .../transport/transport_interface.go | 8 +- 4 files changed, 815 insertions(+), 5 deletions(-) create mode 100644 xds/internal/xdsclient/transport/ads/ads_stream.go diff --git a/xds/internal/xdsclient/internal/internal.go b/xds/internal/xdsclient/internal/internal.go index b66697206c08..6301b2b2be47 100644 --- a/xds/internal/xdsclient/internal/internal.go +++ b/xds/internal/xdsclient/internal/internal.go @@ -25,4 +25,10 @@ var ( // NewADSStream returns a new ADS stream. NewADSStream any // func(context.Context, *grpc.ClientConn) (v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient, error) + + // ResourceWatchStateForTesting gets the watch state for the resource + // identified by the given resource type and resource name. Returns a + // non-nil error if there is no such resource being watched. + ResourceWatchStateForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error + ) diff --git a/xds/internal/xdsclient/transport/ads/ads_stream.go b/xds/internal/xdsclient/transport/ads/ads_stream.go new file mode 100644 index 000000000000..a3a17144d711 --- /dev/null +++ b/xds/internal/xdsclient/transport/ads/ads_stream.go @@ -0,0 +1,804 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package ads provides the implementation of an ADS (Aggregated Discovery +// Service) stream for the xDS client. +package ads + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/buffer" + igrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/transport" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/types/known/anypb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + statuspb "google.golang.org/genproto/googleapis/rpc/status" +) + +// Any per-RPC level logs which print complete request or response messages +// should be gated at this verbosity level. Other per-RPC level logs which print +// terse output should be at `INFO` and verbosity 2. +const perRPCVerbosityLevel = 9 + +// Response represents a response received on the ADS stream. It contains the +// type URL, version, and resources for the response. +type Response struct { + TypeURL string + Version string + Resources []*anypb.Any +} + +// StreamEventHandler is an interface that defines the callbacks for events that +// occur on the ADS stream. Methods on this interface may be invoked +// concurrently and implementations need to handle them in a thread-safe manner. +type StreamEventHandler interface { + OnADSStreamError(error) // Called when the ADS stream breaks. + OnADSWatchExpiry(xdsresource.Type, string) // Called when the watch timer expires for a resource. + OnADSResponse(Response, func()) ([]string, error) // Called when a response is received on the ADS stream. +} + +// WatchState is a enum that describes the watch state of a particular +// resource. +type WatchState int + +const ( + // ResourceWatchStateStarted is the state where a watch for a resource was + // started, but a request asking for that resource is yet to be sent to the + // management server. + ResourceWatchStateStarted WatchState = iota + // ResourceWatchStateRequested is the state when a request has been sent for + // the resource being watched. + ResourceWatchStateRequested + // ResourceWatchStateReceived is the state when a response has been received + // for the resource being watched. + ResourceWatchStateReceived + // ResourceWatchStateTimeout is the state when the watch timer associated + // with the resource expired because no response was received. + ResourceWatchStateTimeout +) + +// ResourceWatchState is the state corresponding to a resource being watched. +type ResourceWatchState struct { + State WatchState // Watch state of the resource. + ExpiryTimer *time.Timer // Timer for the expiry of the watch. +} + +// State corresponding to a resource type. +type resourceTypeState struct { + version string // Last acked version. Should not be reset when the stream breaks. + nonce string // Last received nonce. Should be reset when the stream breaks. + bufferedRequests chan struct{} // Channel to buffer requests when writing is blocked. + subscribedResources map[string]*ResourceWatchState // Map of subscribed resource names to their state. +} + +// StreamImpl provides the functionality associated with an ADS (Aggregated +// Discovery Service) stream on the client side. It manages the lifecycle of the +// ADS stream, including creating the stream, sending requests, and handling +// responses. It also handles flow control and retries for the stream. +type StreamImpl struct { + // The following fields are initialized from arguments passed to the + // constructor and are read-only afterwards, and hence can be accessed + // without a mutex. + transport transport.Interface // Transport to use for ADS stream. + eventHandler StreamEventHandler // Callbacks into the xdsChannel. + backoff func(int) time.Duration // Backoff for retries, after stream failures. + nodeProto *v3corepb.Node // Identifies the gRPC application. + watchExpiryTimeout time.Duration // Resource watch expiry timeout + logger *igrpclog.PrefixLogger + + // The following fields are initialized in the constructor and are not + // written to afterwards, and hence can be accessed without a mutex. + streamCh chan transport.StreamingCall // New ADS streams are pushed here. + requestCh *buffer.Unbounded // Subscriptions and unsubscriptions are pushed here. + runnerDoneCh chan struct{} // Notify completion of runner goroutine. + cancel context.CancelFunc // To cancel the context passed to the runner goroutine. + + // Guards access to the below fields (and to the contents of the map). + mu sync.Mutex + resourceTypeState map[xdsresource.Type]*resourceTypeState // Map of resource types to their state. + fc *adsFlowControl // Flow control for ADS stream. + firstRequest bool // False after the first request is sent out. +} + +// StreamOpts contains the options for creating a new ADS Stream. +type StreamOpts struct { + Transport transport.Interface // xDS transport to create the stream on. + EventHandler StreamEventHandler // Callbacks for stream events. + Backoff func(int) time.Duration // Backoff for retries, after stream failures. + NodeProto *v3corepb.Node // Node proto to identify the gRPC application. + WatchExpiryTimeout time.Duration // Resource watch expiry timeout. + LogPrefix string // Prefix to be used for log messages. +} + +// NewStreamImpl initializes a new StreamImpl instance using the given +// parameters. It also launches goroutines responsible for managing reads and +// writes for messages of the underlying stream. +func NewStreamImpl(opts StreamOpts) *StreamImpl { + s := &StreamImpl{ + transport: opts.Transport, + eventHandler: opts.EventHandler, + backoff: opts.Backoff, + nodeProto: opts.NodeProto, + watchExpiryTimeout: opts.WatchExpiryTimeout, + + streamCh: make(chan transport.StreamingCall, 1), + requestCh: buffer.NewUnbounded(), + runnerDoneCh: make(chan struct{}), + resourceTypeState: make(map[xdsresource.Type]*resourceTypeState), + } + + l := grpclog.Component("xds") + s.logger = igrpclog.NewPrefixLogger(l, opts.LogPrefix+fmt.Sprintf("[ads-stream %p] ", s)) + + ctx, cancel := context.WithCancel(context.Background()) + s.cancel = cancel + go s.runner(ctx) + return s +} + +// Stop blocks until the stream is closed and all spawned goroutines exit. +func (s *StreamImpl) Stop() { + s.cancel() + s.requestCh.Close() + <-s.runnerDoneCh + s.logger.Infof("Stopping ADS stream") +} + +// Subscribe subscribes to the given resource. It is assumed that multiple +// subscriptions for the same resource is deduped at the caller. A discovery +// request is sent out on the underlying stream for the resource type when there +// is sufficient flow control quota. +func (s *StreamImpl) Subscribe(typ xdsresource.Type, name string) { + if s.logger.V(2) { + s.logger.Infof("Subscribing to resource %q of type %q", name, typ.TypeName()) + } + + s.mu.Lock() + defer s.mu.Unlock() + + state, ok := s.resourceTypeState[typ] + if !ok { + // An entry in the type state map is created as part of the first + // subscription request for this type. + state = &resourceTypeState{ + subscribedResources: make(map[string]*ResourceWatchState), + bufferedRequests: make(chan struct{}, 1), + } + s.resourceTypeState[typ] = state + } + + // Create state for the newly subscribed resource. The watch timer will + // be started when a request for this resource is actually sent out. + state.subscribedResources[name] = &ResourceWatchState{State: ResourceWatchStateStarted} + + // Send a request for the resource type with updated subscriptions. + s.requestCh.Put(typ) +} + +// Unsubscribe cancels the subscription to the given resource. It is a no-op if +// the given resource does not exist. The watch expiry timer associated with the +// resource is stopped if one is active. A discovery request is sent out on the +// stream for the resource type when there is sufficient flow control quota. +func (s *StreamImpl) Unsubscribe(typ xdsresource.Type, name string) { + if s.logger.V(2) { + s.logger.Infof("Unsubscribing to resource %q of type %q", name, typ.TypeName()) + } + + s.mu.Lock() + defer s.mu.Unlock() + + state, ok := s.resourceTypeState[typ] + if !ok { + return + } + + rs, ok := state.subscribedResources[name] + if !ok { + return + } + if rs.ExpiryTimer != nil { + rs.ExpiryTimer.Stop() + } + delete(state.subscribedResources, name) + + // Send a request for the resource type with updated subscriptions. + s.requestCh.Put(typ) +} + +// runner is a long-running goroutine that handles the lifecycle of the ADS +// stream. It spwans another goroutine to handle writes of discovery request +// messages on the stream. Whenever an existing stream fails, it performs +// exponential backoff (if no messages were received on that stream) before +// creating a new stream. +func (s *StreamImpl) runner(ctx context.Context) { + defer close(s.runnerDoneCh) + + go s.send(ctx) + + runStreamWithBackoff := func() error { + stream, err := s.transport.CreateStreamingCall(ctx, "/envoy.service.discovery.v3.AggregatedDiscoveryService/StreamAggregatedResources") + if err != nil { + s.logger.Warningf("Failed to create a new ADS streaming RPC: %v", err) + s.onError(err, false) + return nil + } + if s.logger.V(2) { + s.logger.Infof("ADS stream created") + } + + s.mu.Lock() + // Flow control is a property of the underlying streaming RPC call and + // needs to be initialized everytime a new one is created. + s.fc = newADSFlowControl(s.logger) + s.firstRequest = true + s.mu.Unlock() + + // Ensure that the most recently created stream is pushed on the + // channel for the `send` goroutine to consume. + select { + case <-s.streamCh: + default: + } + s.streamCh <- stream + + // Backoff state is reset upon successful receipt of at least one + // message from the server. + if s.recv(ctx, stream) { + return backoff.ErrResetBackoff + } + return nil + } + backoff.RunF(ctx, runStreamWithBackoff, s.backoff) +} + +// send is a long running goroutine that handles sending discovery requests for +// two scenarios: +// - a new subscription or unsubscription request is received +// - a new stream is created after the previous one failed +func (s *StreamImpl) send(ctx context.Context) { + // Stores the most recent stream instance received on streamCh. + var stream transport.StreamingCall + for { + select { + case <-ctx.Done(): + return + case stream = <-s.streamCh: + if err := s.sendExisting(stream); err != nil { + // Send failed, clear the current stream. Attempt to resend will + // only be made after a new stream is created. + stream = nil + continue + } + case req, ok := <-s.requestCh.Get(): + if !ok { + return + } + s.requestCh.Load() + + typ := req.(xdsresource.Type) + if err := s.sendNew(stream, typ); err != nil { + stream = nil + continue + } + } + } +} + +// sendNew attempts to send a discovery request based on a new subscription or +// unsubscription. If there is no flow control quota, the request is buffered +// and will be sent later. This method also starts the watch expiry timer for +// resources that were sent in the request for the first time, i.e. their watch +// state is `watchStateStarted`. +func (s *StreamImpl) sendNew(stream transport.StreamingCall, typ xdsresource.Type) error { + s.mu.Lock() + defer s.mu.Unlock() + + // If there's no stream yet, skip the request. This request will be resent + // when a new stream is created. If no stream is created, the watcher will + // timeout (same as server not sending response back). + if stream == nil { + return nil + } + + // If local processing of the most recently received response is not yet + // complete, i.e. fc.pending == true, queue this write and return early. + // This allows us to batch writes for requests which are generated as part + // of local processing of a received response. + state := s.resourceTypeState[typ] + if s.fc.pending.Load() { + select { + case state.bufferedRequests <- struct{}{}: + default: + } + return nil + } + + names := resourceNames(state.subscribedResources) + if err := s.sendMessageLocked(stream, names, typ.TypeURL(), state.version, state.nonce, nil); err != nil { + return err + + } + select { + case <-state.bufferedRequests: + default: + } + s.startWatchTimersLocked(typ, names) + return nil +} + +// sendExisting sends out discovery requests for existing resources when +// recovering from a broken stream. +// +// The stream argument is guaranteed to be non-nil. +func (s *StreamImpl) sendExisting(stream transport.StreamingCall) error { + s.mu.Lock() + defer s.mu.Unlock() + + for typ, state := range s.resourceTypeState { + // Reset only the nonces map when the stream restarts. + // + // xDS spec says the following. See section: + // https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#ack-nack-and-resource-type-instance-version + // + // Note that the version for a resource type is not a property of an + // individual xDS stream but rather a property of the resources + // themselves. If the stream becomes broken and the client creates a new + // stream, the client’s initial request on the new stream should + // indicate the most recent version seen by the client on the previous + // stream + state.nonce = "" + + if len(state.subscribedResources) == 0 { + continue + } + + names := resourceNames(state.subscribedResources) + if s.logger.V(2) { + s.logger.Infof("Re-requesting resources %v of type %q, as the stream has been recreated", names, typ.TypeURL()) + } + if err := s.sendMessageLocked(stream, names, typ.TypeURL(), state.version, state.nonce, nil); err != nil { + return err + } + select { + case <-state.bufferedRequests: + default: + } + s.startWatchTimersLocked(typ, names) + } + return nil +} + +// sendBuffered sends out discovery requests for resources that were buffered +// when they were subscribed to, because local processing of the previously +// received response was not yet complete. +// +// The stream argument is guaranteed to be non-nil. +func (s *StreamImpl) sendBuffered(stream transport.StreamingCall) error { + s.mu.Lock() + defer s.mu.Unlock() + + for typ, state := range s.resourceTypeState { + select { + case <-state.bufferedRequests: + names := resourceNames(state.subscribedResources) + if err := s.sendMessageLocked(stream, names, typ.TypeURL(), state.version, state.nonce, nil); err != nil { + return err + } + s.startWatchTimersLocked(typ, names) + default: + // No buffered request. + continue + } + } + return nil +} + +// sendMessageLocked sends a discovery request to the server, populating the +// different fields of the message with the given parameters. Returns a non-nil +// error if the request could not be sent. +// +// Caller needs to hold c.mu. +func (s *StreamImpl) sendMessageLocked(stream transport.StreamingCall, names []string, url, version, nonce string, nackErr error) error { + req := &v3discoverypb.DiscoveryRequest{ + ResourceNames: names, + TypeUrl: url, + VersionInfo: version, + ResponseNonce: nonce, + } + + // The xDS protocol only requires that we send the node proto in the first + // discovery request on every stream. Sending the node proto in every + // request wastes CPU resources on the client and the server. + if s.firstRequest { + req.Node = s.nodeProto + } + + if nackErr != nil { + req.ErrorDetail = &statuspb.Status{ + Code: int32(codes.InvalidArgument), Message: nackErr.Error(), + } + } + + if err := stream.Send(req); err != nil { + s.logger.Warningf("Sending ADS request for type %q, resources: %v, version: %q, nonce: %q failed: %v", url, names, version, nonce, err) + return err + } + s.firstRequest = false + + if s.logger.V(perRPCVerbosityLevel) { + s.logger.Infof("ADS request sent: %v", pretty.ToJSON(req)) + } else if s.logger.V(2) { + s.logger.Warningf("ADS request sent for type %q, resources: %v, version: %q, nonce: %q", url, names, version, nonce) + } + return nil +} + +// recv is responsible for receiving messages from the ADS stream. +// +// It performs the following actions: +// - Waits for local flow control to be available before sending buffered +// requests, if any. +// - Receives a message from the ADS stream. If an error is encountered here, +// it is handled by the onError method which propagates the error to all +// watchers. +// - Invokes the event handler's OnADSResponse method to process the message. +// - Sends an ACK or NACK to the server based on the response. +// +// It returns a boolean indicating whether at least one message was received +// from the server. +func (s *StreamImpl) recv(ctx context.Context, stream transport.StreamingCall) bool { + msgReceived := false + for { + // Wait for ADS stream level flow control to be available, and send out + // a request if anything was buffered while we were waiting for local + // processing of the previous response to complete. + if !s.fc.wait(ctx) { + if s.logger.V(2) { + s.logger.Infof("ADS stream context canceled") + } + return msgReceived + } + s.sendBuffered(stream) + + resources, url, version, nonce, err := s.recvMessage(stream) + if err != nil { + s.onError(err, msgReceived) + s.logger.Warningf("ADS stream closed: %v", err) + return msgReceived + } + msgReceived = true + + // Invoke the onResponse event handler to parse the incoming message and + // decide whether to send an ACK or NACK. + resp := Response{ + Resources: resources, + TypeURL: url, + Version: version, + } + var resourceNames []string + var nackErr error + s.fc.setPending() + resourceNames, nackErr = s.eventHandler.OnADSResponse(resp, s.fc.onDone) + if xdsresource.ErrType(nackErr) == xdsresource.ErrorTypeResourceTypeUnsupported { + // Based on gRFC A27, a general guiding principle is that if the + // server sends something the client didn't actually subscribe to, + // then the client ignores it. Here, we have received a response + // with resources of a type that we don't know about. + // + // Sending a NACK doesn't really seem appropriate here, since we're + // not actually validating what the server sent and therefore don't + // know that it's invalid. But we shouldn't ACK either, because we + // don't know that it is valid. + s.logger.Warningf("%v", nackErr) + continue + } + + s.onRecv(stream, resourceNames, url, version, nonce, nackErr) + } +} + +func (s *StreamImpl) recvMessage(stream transport.StreamingCall) (resources []*anypb.Any, url, version, nonce string, err error) { + r, err := stream.Recv() + if err != nil { + return nil, "", "", "", err + } + resp, ok := r.(*v3discoverypb.DiscoveryResponse) + if !ok { + s.logger.Infof("Message received on ADS stream of unexpected type: %T", r) + return nil, "", "", "", fmt.Errorf("unexpected message type %T", r) + } + + if s.logger.V(perRPCVerbosityLevel) { + s.logger.Infof("ADS response received: %v", pretty.ToJSON(resp)) + } else if s.logger.V(2) { + s.logger.Infof("ADS response received for type %q, version %q, nonce %q", resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce()) + } + return resp.GetResources(), resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce(), nil +} + +// onRecv is invoked when a response is received from the server. The arguments +// passed to this method correspond to the most recently received response. +// +// It performs the following actions: +// - updates resource type specific state +// - updates resource specific state for resources in the response +// - sends an ACK or NACK to the server based on the response +func (s *StreamImpl) onRecv(stream transport.StreamingCall, names []string, url, version, nonce string, nackErr error) { + s.mu.Lock() + defer s.mu.Unlock() + + // Lookup the resource type specific state based on the type URL. + var typ xdsresource.Type + for t := range s.resourceTypeState { + if t.TypeURL() == url { + typ = t + break + } + } + typeState, ok := s.resourceTypeState[typ] + if !ok { + s.logger.Warningf("ADS stream received a response for type %q, but no state exists for it", url) + return + } + + // Update the resource type specific state. This includes: + // - updating the nonce unconditionally + // - updating the version only if the response is to be ACKed + previousVersion := typeState.version + typeState.nonce = nonce + if nackErr == nil { + typeState.version = version + } + + // Update the resource specific state. For all resources received as + // part of this response that are in state `started` or `requested`, + // this includes: + // - setting the watch state to watchstateReceived + // - stopping the expiry timer, if one exists + for _, name := range names { + rs, ok := typeState.subscribedResources[name] + if !ok { + s.logger.Warningf("ADS stream received a response for resource %q, but no state exists for it", name) + continue + } + if ws := rs.State; ws == ResourceWatchStateStarted || ws == ResourceWatchStateRequested { + rs.State = ResourceWatchStateReceived + if rs.ExpiryTimer != nil { + rs.ExpiryTimer.Stop() + rs.ExpiryTimer = nil + } + } + } + + // Send an ACK or NACK. + subscribedResourceNames := resourceNames(typeState.subscribedResources) + if nackErr != nil { + s.logger.Warningf("Sending NACK for resource type: %q, version: %q, nonce: %q, reason: %v", url, version, nonce, nackErr) + s.sendMessageLocked(stream, subscribedResourceNames, url, previousVersion, nonce, nackErr) + return + } + + if s.logger.V(2) { + s.logger.Infof("Sending ACK for resource type: %q, version: %q, nonce: %q", url, version, nonce) + } + s.sendMessageLocked(stream, subscribedResourceNames, url, version, nonce, nil) +} + +// onError is called when an error occurs on the ADS stream. It stops any +// outstanding resource timers and resets the watch state to started for any +// resources that were in the requested state. It also handles the case where +// the ADS stream was closed after receiving a response, which is not +// considered an error. +func (s *StreamImpl) onError(err error, msgReceived bool) { + // For resources that been requested but not yet responded to by the + // management server, stop the resource timers and reset the watch state to + // watchStateStarted. This is because we don't want the expiry timer to be + // running when we don't have a stream open to the management server. + s.mu.Lock() + for _, state := range s.resourceTypeState { + for _, rs := range state.subscribedResources { + if rs.State != ResourceWatchStateRequested { + continue + } + if rs.ExpiryTimer != nil { + rs.ExpiryTimer.Stop() + rs.ExpiryTimer = nil + } + rs.State = ResourceWatchStateStarted + } + } + s.mu.Unlock() + + // Note that we do not consider it an error if the ADS stream was closed + // after having received a response on the stream. This is because there + // are legitimate reasons why the server may need to close the stream during + // normal operations, such as needing to rebalance load or the underlying + // connection hitting its max connection age limit. + // (see [gRFC A9](https://github.com/grpc/proposal/blob/master/A9-server-side-conn-mgt.md)). + if msgReceived { + err = xdsresource.NewErrorf(xdsresource.ErrTypeStreamFailedAfterRecv, err.Error()) + } + + s.eventHandler.OnADSStreamError(err) +} + +// startWatchTimersLocked starts the expiry timers for the given resource names +// of the specified resource type. For each resource name, if the resource +// watch state is in the "started" state, it transitions the state to +// "requested" and starts an expiry timer. When the timer expires, the resource +// watch state is set to "timeout" and the event handler callback is called. +// +// The caller must hold the s.mu lock. +func (s *StreamImpl) startWatchTimersLocked(typ xdsresource.Type, names []string) { + typeState := s.resourceTypeState[typ] + for _, name := range names { + resourceState, ok := typeState.subscribedResources[name] + if !ok { + continue + } + if resourceState.State != ResourceWatchStateStarted { + continue + } + resourceState.State = ResourceWatchStateRequested + + rs := resourceState + resourceState.ExpiryTimer = time.AfterFunc(s.watchExpiryTimeout, func() { + s.mu.Lock() + rs.State = ResourceWatchStateTimeout + rs.ExpiryTimer = nil + s.mu.Unlock() + s.eventHandler.OnADSWatchExpiry(typ, name) + }) + } +} + +func resourceNames(m map[string]*ResourceWatchState) []string { + ret := make([]string, len(m)) + idx := 0 + for name := range m { + ret[idx] = name + idx++ + } + return ret +} + +// TriggerResourceNotFoundForTesting triggers a resource not found event for the +// given resource type and name. This is intended for testing purposes only, to +// simulate a resource not found scenario. +func (s *StreamImpl) TriggerResourceNotFoundForTesting(typ xdsresource.Type, resourceName string) { + s.mu.Lock() + + state, ok := s.resourceTypeState[typ] + if !ok { + s.mu.Unlock() + return + } + resourceState, ok := state.subscribedResources[resourceName] + if !ok { + s.mu.Unlock() + return + } + + if s.logger.V(2) { + s.logger.Infof("Triggering resource not found for type: %s, resource name: %s", typ.TypeName(), resourceName) + } + resourceState.State = ResourceWatchStateTimeout + if resourceState.ExpiryTimer != nil { + resourceState.ExpiryTimer.Stop() + resourceState.ExpiryTimer = nil + } + s.mu.Unlock() + go s.eventHandler.OnADSWatchExpiry(typ, resourceName) +} + +// ResourceWatchStateForTesting returns the ResourceWatchState for the given +// resource type and name. This is intended for testing purposes only, to +// inspect the internal state of the ADS stream. +func (s *StreamImpl) ResourceWatchStateForTesting(typ xdsresource.Type, resourceName string) (ResourceWatchState, error) { + s.mu.Lock() + defer s.mu.Unlock() + + state, ok := s.resourceTypeState[typ] + if !ok { + return ResourceWatchState{}, fmt.Errorf("unknown resource type: %v", typ) + } + resourceState, ok := state.subscribedResources[resourceName] + if !ok { + return ResourceWatchState{}, fmt.Errorf("unknown resource name: %v", resourceName) + } + return *resourceState, nil +} + +// adsFlowControl implements ADS stream level flow control that enables the +// transport to block the reading of the next message off of the stream until +// the previous update is consumed by all watchers. +// +// The lifetime of the flow control is tied to the lifetime of the stream. +type adsFlowControl struct { + logger *igrpclog.PrefixLogger + + // Whether the most recent update is pending consumption by all watchers. + pending atomic.Bool + // Channel used to notify when all the watchers have consumed the most + // recent update. Wait() blocks on reading a value from this channel. + readyCh chan struct{} +} + +// newADSFlowControl returns a new adsFlowControl. +func newADSFlowControl(logger *igrpclog.PrefixLogger) *adsFlowControl { + return &adsFlowControl{ + logger: logger, + readyCh: make(chan struct{}, 1), + } +} + +// setPending changes the internal state to indicate that there is an update +// pending consumption by all watchers. +func (fc *adsFlowControl) setPending() { + fc.pending.Store(true) +} + +// wait blocks until all the watchers have consumed the most recent update and +// returns true. If the context expires before that, it returns false. +func (fc *adsFlowControl) wait(ctx context.Context) bool { + // If there is no pending update, there is no need to block. + if !fc.pending.Load() { + // If all watchers finished processing the most recent update before the + // `recv` goroutine made the next call to `Wait()`, there would be an + // entry in the readyCh channel that needs to be drained to ensure that + // the next call to `Wait()` doesn't unblock before it actually should. + select { + case <-fc.readyCh: + default: + } + return true + } + + select { + case <-ctx.Done(): + return false + case <-fc.readyCh: + return true + } +} + +// onDone indicates that all watchers have consumed the most recent update. +func (fc *adsFlowControl) onDone() { + fc.pending.Store(false) + + select { + // Writes to the readyCh channel should not block ideally. The default + // branch here is to appease the paranoid mind. + case fc.readyCh <- struct{}{}: + default: + if fc.logger.V(2) { + fc.logger.Infof("ADS stream flow control readyCh is full") + } + } +} diff --git a/xds/internal/xdsclient/transport/grpctransport/grpctransport_ext_test.go b/xds/internal/xdsclient/transport/grpctransport/grpctransport_ext_test.go index 8831fb8299a7..2e375f0b5ac1 100644 --- a/xds/internal/xdsclient/transport/grpctransport/grpctransport_ext_test.go +++ b/xds/internal/xdsclient/transport/grpctransport/grpctransport_ext_test.go @@ -1,6 +1,6 @@ /* * - * Copyright 2022 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/xds/internal/xdsclient/transport/transport_interface.go b/xds/internal/xdsclient/transport/transport_interface.go index db8a19931916..6a33e449531f 100644 --- a/xds/internal/xdsclient/transport/transport_interface.go +++ b/xds/internal/xdsclient/transport/transport_interface.go @@ -28,24 +28,24 @@ import ( // Builder is an interface for building a new xDS transport. type Builder interface { // Build creates a new xDS transport with the provided options. - Build(opts BuildOptions) (Transport, error) + Build(opts BuildOptions) (Interface, error) } // BuildOptions contains the options for building a new xDS transport. type BuildOptions struct { // ServerConfig contains the configuration that controls how the transport - // interacts with the XDS server. This includes the server URI and the + // interacts with the xDS server. This includes the server URI and the // credentials to use to connect to the server, among other things. ServerConfig *bootstrap.ServerConfig } -// Interface provides the functionality to communicate with an XDS server using +// Interface provides the functionality to communicate with an xDS server using // streaming calls. // // TODO(easwars): Rename this to Transport once the existing Transport type is // removed. type Interface interface { - // CreateStreamingCall creates a new streaming call to the XDS server for the + // CreateStreamingCall creates a new streaming call to the xDS server for the // specified method name. The returned StreamingCall interface can be used to // send and receive messages on the stream. CreateStreamingCall(context.Context, string) (StreamingCall, error) From 56df169480cdb4928a24a50b5289f909f0d81ba7 Mon Sep 17 00:00:00 2001 From: Purnesh Dixit Date: Thu, 17 Oct 2024 22:00:36 +0530 Subject: [PATCH 16/57] resolver: update ReportError() docstring (#7732) --- resolver/resolver.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/resolver/resolver.go b/resolver/resolver.go index 202854511b81..96294ac3f444 100644 --- a/resolver/resolver.go +++ b/resolver/resolver.go @@ -237,8 +237,8 @@ type ClientConn interface { // UpdateState can be omitted. UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an - // error. The ClientConn will notify the load balancer and begin calling - // ResolveNow on the Resolver with exponential backoff. + // error. The ClientConn then forwards this error to the load balancing + // policy. ReportError(error) // NewAddress is called by resolver to notify ClientConn a new list // of resolved addresses. From 98959d9a4904e98bbf8b423ce6a3cb5d36f90ee1 Mon Sep 17 00:00:00 2001 From: Purnesh Dixit Date: Fri, 18 Oct 2024 21:07:37 +0530 Subject: [PATCH 17/57] deps: update dependencies for all modules (#7755) * Update gRPC-Go's dependency versions on master * update protos * disabled redefines-builtin-id lint rule --- .../grpclb/grpc_lb_v1/load_balancer.pb.go | 202 +--- binarylog/grpc_binarylog_v1/binarylog.pb.go | 180 +--- channelz/grpc_channelz_v1/channelz.pb.go | 862 ++++-------------- cmd/protoc-gen-go-grpc/go.mod | 2 +- cmd/protoc-gen-go-grpc/go.sum | 4 +- .../internal/proto/grpc_gcp/altscontext.pb.go | 26 +- .../internal/proto/grpc_gcp/handshaker.pb.go | 224 +---- .../grpc_gcp/transport_security_common.pb.go | 48 +- examples/features/proto/echo/echo.pb.go | 48 +- examples/go.mod | 92 +- examples/go.sum | 189 ++-- .../helloworld/helloworld/helloworld.pb.go | 48 +- .../route_guide/routeguide/route_guide.pb.go | 114 +-- gcp/observability/go.mod | 70 +- gcp/observability/go.sum | 157 ++-- go.mod | 18 +- go.sum | 36 +- health/grpc_health_v1/health.pb.go | 48 +- internal/proto/grpc_lookup_v1/rls.pb.go | 48 +- .../proto/grpc_lookup_v1/rls_config.pb.go | 158 +--- interop/grpc_testing/benchmark_service.pb.go | 2 +- interop/grpc_testing/control.pb.go | 422 ++------- interop/grpc_testing/core/stats.pb.go | 92 +- interop/grpc_testing/empty.pb.go | 26 +- interop/grpc_testing/messages.pb.go | 642 +++---------- interop/grpc_testing/payloads.pb.go | 92 +- .../report_qps_scenario_service.pb.go | 2 +- interop/grpc_testing/stats.pb.go | 114 +-- interop/grpc_testing/test.pb.go | 2 +- interop/grpc_testing/worker_service.pb.go | 2 +- interop/observability/go.mod | 70 +- interop/observability/go.sum | 157 ++-- interop/stress/grpc_testing/metrics.pb.go | 70 +- interop/xds/go.mod | 44 +- interop/xds/go.sum | 80 +- profiling/proto/service.pb.go | 136 +-- .../grpc_reflection_v1/reflection.pb.go | 180 +--- .../grpc_reflection_v1alpha/reflection.pb.go | 180 +--- reflection/grpc_testing/proto2.pb.go | 28 +- reflection/grpc_testing/proto2_ext.pb.go | 26 +- reflection/grpc_testing/proto2_ext2.pb.go | 26 +- reflection/grpc_testing/test.pb.go | 70 +- reflection/test/go.mod | 12 +- reflection/test/go.sum | 20 +- scripts/revive.toml | 4 +- security/advancedtls/examples/go.mod | 16 +- security/advancedtls/examples/go.sum | 24 +- security/advancedtls/go.mod | 14 +- security/advancedtls/go.sum | 24 +- stats/opencensus/go.mod | 12 +- stats/opencensus/go.sum | 39 +- stats/opentelemetry/go.mod | 34 +- stats/opentelemetry/go.sum | 64 +- test/codec_perf/perf.pb.go | 26 +- test/tools/go.mod | 13 +- test/tools/go.sum | 26 +- 56 files changed, 1387 insertions(+), 3978 deletions(-) diff --git a/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index 52f54e6a016c..3f274482c74b 100644 --- a/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/lb/v1/load_balancer.proto @@ -55,11 +55,9 @@ type LoadBalanceRequest struct { func (x *LoadBalanceRequest) Reset() { *x = LoadBalanceRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LoadBalanceRequest) String() string { @@ -70,7 +68,7 @@ func (*LoadBalanceRequest) ProtoMessage() {} func (x *LoadBalanceRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -139,11 +137,9 @@ type InitialLoadBalanceRequest struct { func (x *InitialLoadBalanceRequest) Reset() { *x = InitialLoadBalanceRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *InitialLoadBalanceRequest) String() string { @@ -154,7 +150,7 @@ func (*InitialLoadBalanceRequest) ProtoMessage() {} func (x *InitialLoadBalanceRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -190,11 +186,9 @@ type ClientStatsPerToken struct { func (x *ClientStatsPerToken) Reset() { *x = ClientStatsPerToken{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClientStatsPerToken) String() string { @@ -205,7 +199,7 @@ func (*ClientStatsPerToken) ProtoMessage() {} func (x *ClientStatsPerToken) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -258,11 +252,9 @@ type ClientStats struct { func (x *ClientStats) Reset() { *x = ClientStats{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClientStats) String() string { @@ -273,7 +265,7 @@ func (*ClientStats) ProtoMessage() {} func (x *ClientStats) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -345,11 +337,9 @@ type LoadBalanceResponse struct { func (x *LoadBalanceResponse) Reset() { *x = LoadBalanceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LoadBalanceResponse) String() string { @@ -360,7 +350,7 @@ func (*LoadBalanceResponse) ProtoMessage() {} func (x *LoadBalanceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -438,11 +428,9 @@ type FallbackResponse struct { func (x *FallbackResponse) Reset() { *x = FallbackResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FallbackResponse) String() string { @@ -453,7 +441,7 @@ func (*FallbackResponse) ProtoMessage() {} func (x *FallbackResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -481,11 +469,9 @@ type InitialLoadBalanceResponse struct { func (x *InitialLoadBalanceResponse) Reset() { *x = InitialLoadBalanceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *InitialLoadBalanceResponse) String() string { @@ -496,7 +482,7 @@ func (*InitialLoadBalanceResponse) ProtoMessage() {} func (x *InitialLoadBalanceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -532,11 +518,9 @@ type ServerList struct { func (x *ServerList) Reset() { *x = ServerList{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerList) String() string { @@ -547,7 +531,7 @@ func (*ServerList) ProtoMessage() {} func (x *ServerList) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -597,11 +581,9 @@ type Server struct { func (x *Server) Reset() { *x = Server{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Server) String() string { @@ -612,7 +594,7 @@ func (*Server) ProtoMessage() {} func (x *Server) ProtoReflect() protoreflect.Message { mi := &file_grpc_lb_v1_load_balancer_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -817,116 +799,6 @@ func file_grpc_lb_v1_load_balancer_proto_init() { if File_grpc_lb_v1_load_balancer_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*LoadBalanceRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*InitialLoadBalanceRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ClientStatsPerToken); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ClientStats); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*LoadBalanceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*FallbackResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*InitialLoadBalanceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*ServerList); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*Server); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []any{ (*LoadBalanceRequest_InitialRequest)(nil), (*LoadBalanceRequest_ClientStats)(nil), diff --git a/binarylog/grpc_binarylog_v1/binarylog.pb.go b/binarylog/grpc_binarylog_v1/binarylog.pb.go index 55bffaa77ef0..9e9d0806995c 100644 --- a/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto @@ -274,11 +274,9 @@ type GrpcLogEntry struct { func (x *GrpcLogEntry) Reset() { *x = GrpcLogEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GrpcLogEntry) String() string { @@ -289,7 +287,7 @@ func (*GrpcLogEntry) ProtoMessage() {} func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -440,11 +438,9 @@ type ClientHeader struct { func (x *ClientHeader) Reset() { *x = ClientHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClientHeader) String() string { @@ -455,7 +451,7 @@ func (*ClientHeader) ProtoMessage() {} func (x *ClientHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -509,11 +505,9 @@ type ServerHeader struct { func (x *ServerHeader) Reset() { *x = ServerHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerHeader) String() string { @@ -524,7 +518,7 @@ func (*ServerHeader) ProtoMessage() {} func (x *ServerHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -565,11 +559,9 @@ type Trailer struct { func (x *Trailer) Reset() { *x = Trailer{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Trailer) String() string { @@ -580,7 +572,7 @@ func (*Trailer) ProtoMessage() {} func (x *Trailer) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -638,11 +630,9 @@ type Message struct { func (x *Message) Reset() { *x = Message{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Message) String() string { @@ -653,7 +643,7 @@ func (*Message) ProtoMessage() {} func (x *Message) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -713,11 +703,9 @@ type Metadata struct { func (x *Metadata) Reset() { *x = Metadata{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Metadata) String() string { @@ -728,7 +716,7 @@ func (*Metadata) ProtoMessage() {} func (x *Metadata) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -762,11 +750,9 @@ type MetadataEntry struct { func (x *MetadataEntry) Reset() { *x = MetadataEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MetadataEntry) String() string { @@ -777,7 +763,7 @@ func (*MetadataEntry) ProtoMessage() {} func (x *MetadataEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -820,11 +806,9 @@ type Address struct { func (x *Address) Reset() { *x = Address{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Address) String() string { @@ -835,7 +819,7 @@ func (*Address) ProtoMessage() {} func (x *Address) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1057,104 +1041,6 @@ func file_grpc_binlog_v1_binarylog_proto_init() { if File_grpc_binlog_v1_binarylog_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GrpcLogEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*ClientHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ServerHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*Trailer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Message); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*Metadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*MetadataEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*Address); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), diff --git a/channelz/grpc_channelz_v1/channelz.pb.go b/channelz/grpc_channelz_v1/channelz.pb.go index c227d8a58a71..18456d750246 100644 --- a/channelz/grpc_channelz_v1/channelz.pb.go +++ b/channelz/grpc_channelz_v1/channelz.pb.go @@ -21,7 +21,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/channelz/v1/channelz.proto @@ -181,11 +181,9 @@ type Channel struct { func (x *Channel) Reset() { *x = Channel{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Channel) String() string { @@ -196,7 +194,7 @@ func (*Channel) ProtoMessage() {} func (x *Channel) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -272,11 +270,9 @@ type Subchannel struct { func (x *Subchannel) Reset() { *x = Subchannel{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Subchannel) String() string { @@ -287,7 +283,7 @@ func (*Subchannel) ProtoMessage() {} func (x *Subchannel) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -349,11 +345,9 @@ type ChannelConnectivityState struct { func (x *ChannelConnectivityState) Reset() { *x = ChannelConnectivityState{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ChannelConnectivityState) String() string { @@ -364,7 +358,7 @@ func (*ChannelConnectivityState) ProtoMessage() {} func (x *ChannelConnectivityState) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -411,11 +405,9 @@ type ChannelData struct { func (x *ChannelData) Reset() { *x = ChannelData{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ChannelData) String() string { @@ -426,7 +418,7 @@ func (*ChannelData) ProtoMessage() {} func (x *ChannelData) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -517,11 +509,9 @@ type ChannelTraceEvent struct { func (x *ChannelTraceEvent) Reset() { *x = ChannelTraceEvent{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ChannelTraceEvent) String() string { @@ -532,7 +522,7 @@ func (*ChannelTraceEvent) ProtoMessage() {} func (x *ChannelTraceEvent) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -623,11 +613,9 @@ type ChannelTrace struct { func (x *ChannelTrace) Reset() { *x = ChannelTrace{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ChannelTrace) String() string { @@ -638,7 +626,7 @@ func (*ChannelTrace) ProtoMessage() {} func (x *ChannelTrace) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -688,11 +676,9 @@ type ChannelRef struct { func (x *ChannelRef) Reset() { *x = ChannelRef{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ChannelRef) String() string { @@ -703,7 +689,7 @@ func (*ChannelRef) ProtoMessage() {} func (x *ChannelRef) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -746,11 +732,9 @@ type SubchannelRef struct { func (x *SubchannelRef) Reset() { *x = SubchannelRef{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SubchannelRef) String() string { @@ -761,7 +745,7 @@ func (*SubchannelRef) ProtoMessage() {} func (x *SubchannelRef) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -804,11 +788,9 @@ type SocketRef struct { func (x *SocketRef) Reset() { *x = SocketRef{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SocketRef) String() string { @@ -819,7 +801,7 @@ func (*SocketRef) ProtoMessage() {} func (x *SocketRef) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -862,11 +844,9 @@ type ServerRef struct { func (x *ServerRef) Reset() { *x = ServerRef{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerRef) String() string { @@ -877,7 +857,7 @@ func (*ServerRef) ProtoMessage() {} func (x *ServerRef) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -924,11 +904,9 @@ type Server struct { func (x *Server) Reset() { *x = Server{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Server) String() string { @@ -939,7 +917,7 @@ func (*Server) ProtoMessage() {} func (x *Server) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -995,11 +973,9 @@ type ServerData struct { func (x *ServerData) Reset() { *x = ServerData{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerData) String() string { @@ -1010,7 +986,7 @@ func (*ServerData) ProtoMessage() {} func (x *ServerData) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1084,11 +1060,9 @@ type Socket struct { func (x *Socket) Reset() { *x = Socket{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Socket) String() string { @@ -1099,7 +1073,7 @@ func (*Socket) ProtoMessage() {} func (x *Socket) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1206,11 +1180,9 @@ type SocketData struct { func (x *SocketData) Reset() { *x = SocketData{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SocketData) String() string { @@ -1221,7 +1193,7 @@ func (*SocketData) ProtoMessage() {} func (x *SocketData) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1343,11 +1315,9 @@ type Address struct { func (x *Address) Reset() { *x = Address{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Address) String() string { @@ -1358,7 +1328,7 @@ func (*Address) ProtoMessage() {} func (x *Address) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1438,11 +1408,9 @@ type Security struct { func (x *Security) Reset() { *x = Security{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Security) String() string { @@ -1453,7 +1421,7 @@ func (*Security) ProtoMessage() {} func (x *Security) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1525,11 +1493,9 @@ type SocketOption struct { func (x *SocketOption) Reset() { *x = SocketOption{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SocketOption) String() string { @@ -1540,7 +1506,7 @@ func (*SocketOption) ProtoMessage() {} func (x *SocketOption) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1588,11 +1554,9 @@ type SocketOptionTimeout struct { func (x *SocketOptionTimeout) Reset() { *x = SocketOptionTimeout{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SocketOptionTimeout) String() string { @@ -1603,7 +1567,7 @@ func (*SocketOptionTimeout) ProtoMessage() {} func (x *SocketOptionTimeout) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1640,11 +1604,9 @@ type SocketOptionLinger struct { func (x *SocketOptionLinger) Reset() { *x = SocketOptionLinger{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SocketOptionLinger) String() string { @@ -1655,7 +1617,7 @@ func (*SocketOptionLinger) ProtoMessage() {} func (x *SocketOptionLinger) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1724,11 +1686,9 @@ type SocketOptionTcpInfo struct { func (x *SocketOptionTcpInfo) Reset() { *x = SocketOptionTcpInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SocketOptionTcpInfo) String() string { @@ -1739,7 +1699,7 @@ func (*SocketOptionTcpInfo) ProtoMessage() {} func (x *SocketOptionTcpInfo) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1976,11 +1936,9 @@ type GetTopChannelsRequest struct { func (x *GetTopChannelsRequest) Reset() { *x = GetTopChannelsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetTopChannelsRequest) String() string { @@ -1991,7 +1949,7 @@ func (*GetTopChannelsRequest) ProtoMessage() {} func (x *GetTopChannelsRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2037,11 +1995,9 @@ type GetTopChannelsResponse struct { func (x *GetTopChannelsResponse) Reset() { *x = GetTopChannelsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetTopChannelsResponse) String() string { @@ -2052,7 +2008,7 @@ func (*GetTopChannelsResponse) ProtoMessage() {} func (x *GetTopChannelsResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2100,11 +2056,9 @@ type GetServersRequest struct { func (x *GetServersRequest) Reset() { *x = GetServersRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetServersRequest) String() string { @@ -2115,7 +2069,7 @@ func (*GetServersRequest) ProtoMessage() {} func (x *GetServersRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2161,11 +2115,9 @@ type GetServersResponse struct { func (x *GetServersResponse) Reset() { *x = GetServersResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetServersResponse) String() string { @@ -2176,7 +2128,7 @@ func (*GetServersResponse) ProtoMessage() {} func (x *GetServersResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2216,11 +2168,9 @@ type GetServerRequest struct { func (x *GetServerRequest) Reset() { *x = GetServerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetServerRequest) String() string { @@ -2231,7 +2181,7 @@ func (*GetServerRequest) ProtoMessage() {} func (x *GetServerRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2265,11 +2215,9 @@ type GetServerResponse struct { func (x *GetServerResponse) Reset() { *x = GetServerResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetServerResponse) String() string { @@ -2280,7 +2228,7 @@ func (*GetServerResponse) ProtoMessage() {} func (x *GetServerResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2322,11 +2270,9 @@ type GetServerSocketsRequest struct { func (x *GetServerSocketsRequest) Reset() { *x = GetServerSocketsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetServerSocketsRequest) String() string { @@ -2337,7 +2283,7 @@ func (*GetServerSocketsRequest) ProtoMessage() {} func (x *GetServerSocketsRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2390,11 +2336,9 @@ type GetServerSocketsResponse struct { func (x *GetServerSocketsResponse) Reset() { *x = GetServerSocketsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetServerSocketsResponse) String() string { @@ -2405,7 +2349,7 @@ func (*GetServerSocketsResponse) ProtoMessage() {} func (x *GetServerSocketsResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2445,11 +2389,9 @@ type GetChannelRequest struct { func (x *GetChannelRequest) Reset() { *x = GetChannelRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetChannelRequest) String() string { @@ -2460,7 +2402,7 @@ func (*GetChannelRequest) ProtoMessage() {} func (x *GetChannelRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2494,11 +2436,9 @@ type GetChannelResponse struct { func (x *GetChannelResponse) Reset() { *x = GetChannelResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetChannelResponse) String() string { @@ -2509,7 +2449,7 @@ func (*GetChannelResponse) ProtoMessage() {} func (x *GetChannelResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2542,11 +2482,9 @@ type GetSubchannelRequest struct { func (x *GetSubchannelRequest) Reset() { *x = GetSubchannelRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetSubchannelRequest) String() string { @@ -2557,7 +2495,7 @@ func (*GetSubchannelRequest) ProtoMessage() {} func (x *GetSubchannelRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2591,11 +2529,9 @@ type GetSubchannelResponse struct { func (x *GetSubchannelResponse) Reset() { *x = GetSubchannelResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetSubchannelResponse) String() string { @@ -2606,7 +2542,7 @@ func (*GetSubchannelResponse) ProtoMessage() {} func (x *GetSubchannelResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2643,11 +2579,9 @@ type GetSocketRequest struct { func (x *GetSocketRequest) Reset() { *x = GetSocketRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetSocketRequest) String() string { @@ -2658,7 +2592,7 @@ func (*GetSocketRequest) ProtoMessage() {} func (x *GetSocketRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2699,11 +2633,9 @@ type GetSocketResponse struct { func (x *GetSocketResponse) Reset() { *x = GetSocketResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetSocketResponse) String() string { @@ -2714,7 +2646,7 @@ func (*GetSocketResponse) ProtoMessage() {} func (x *GetSocketResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2750,11 +2682,9 @@ type Address_TcpIpAddress struct { func (x *Address_TcpIpAddress) Reset() { *x = Address_TcpIpAddress{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Address_TcpIpAddress) String() string { @@ -2765,7 +2695,7 @@ func (*Address_TcpIpAddress) ProtoMessage() {} func (x *Address_TcpIpAddress) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2805,11 +2735,9 @@ type Address_UdsAddress struct { func (x *Address_UdsAddress) Reset() { *x = Address_UdsAddress{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Address_UdsAddress) String() string { @@ -2820,7 +2748,7 @@ func (*Address_UdsAddress) ProtoMessage() {} func (x *Address_UdsAddress) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2856,11 +2784,9 @@ type Address_OtherAddress struct { func (x *Address_OtherAddress) Reset() { *x = Address_OtherAddress{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Address_OtherAddress) String() string { @@ -2871,7 +2797,7 @@ func (*Address_OtherAddress) ProtoMessage() {} func (x *Address_OtherAddress) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2918,11 +2844,9 @@ type Security_Tls struct { func (x *Security_Tls) Reset() { *x = Security_Tls{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Security_Tls) String() string { @@ -2933,7 +2857,7 @@ func (*Security_Tls) ProtoMessage() {} func (x *Security_Tls) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3016,11 +2940,9 @@ type Security_OtherSecurity struct { func (x *Security_OtherSecurity) Reset() { *x = Security_OtherSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Security_OtherSecurity) String() string { @@ -3031,7 +2953,7 @@ func (*Security_OtherSecurity) ProtoMessage() {} func (x *Security_OtherSecurity) ProtoReflect() protoreflect.Message { mi := &file_grpc_channelz_v1_channelz_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3717,476 +3639,6 @@ func file_grpc_channelz_v1_channelz_proto_init() { if File_grpc_channelz_v1_channelz_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_channelz_v1_channelz_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Channel); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Subchannel); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ChannelConnectivityState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ChannelData); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*ChannelTraceEvent); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*ChannelTrace); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*ChannelRef); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*SubchannelRef); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*SocketRef); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*ServerRef); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*Server); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*ServerData); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*Socket); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*SocketData); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*Address); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*Security); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*SocketOption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*SocketOptionTimeout); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*SocketOptionLinger); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*SocketOptionTcpInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*GetTopChannelsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*GetTopChannelsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*GetServersRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*GetServersResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*GetServerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*GetServerResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*GetServerSocketsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*GetServerSocketsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*GetChannelRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[29].Exporter = func(v any, i int) any { - switch v := v.(*GetChannelResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[30].Exporter = func(v any, i int) any { - switch v := v.(*GetSubchannelRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[31].Exporter = func(v any, i int) any { - switch v := v.(*GetSubchannelResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[32].Exporter = func(v any, i int) any { - switch v := v.(*GetSocketRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[33].Exporter = func(v any, i int) any { - switch v := v.(*GetSocketResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[34].Exporter = func(v any, i int) any { - switch v := v.(*Address_TcpIpAddress); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[35].Exporter = func(v any, i int) any { - switch v := v.(*Address_UdsAddress); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[36].Exporter = func(v any, i int) any { - switch v := v.(*Address_OtherAddress); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[37].Exporter = func(v any, i int) any { - switch v := v.(*Security_Tls); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_channelz_v1_channelz_proto_msgTypes[38].Exporter = func(v any, i int) any { - switch v := v.(*Security_OtherSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_grpc_channelz_v1_channelz_proto_msgTypes[4].OneofWrappers = []any{ (*ChannelTraceEvent_ChannelRef)(nil), (*ChannelTraceEvent_SubchannelRef)(nil), diff --git a/cmd/protoc-gen-go-grpc/go.mod b/cmd/protoc-gen-go-grpc/go.mod index 811feafbea7d..00b37ce12e68 100644 --- a/cmd/protoc-gen-go-grpc/go.mod +++ b/cmd/protoc-gen-go-grpc/go.mod @@ -4,7 +4,7 @@ go 1.22.7 require ( google.golang.org/grpc v1.65.0 - google.golang.org/protobuf v1.34.2 + google.golang.org/protobuf v1.35.1 ) require ( diff --git a/cmd/protoc-gen-go-grpc/go.sum b/cmd/protoc-gen-go-grpc/go.sum index f8aa00fb421a..5e87ede1d588 100644 --- a/cmd/protoc-gen-go-grpc/go.sum +++ b/cmd/protoc-gen-go-grpc/go.sum @@ -10,5 +10,5 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= diff --git a/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index b7de8f05b763..83d23f65aa54 100644 --- a/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/gcp/altscontext.proto @@ -60,11 +60,9 @@ type AltsContext struct { func (x *AltsContext) Reset() { *x = AltsContext{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_altscontext_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_altscontext_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AltsContext) String() string { @@ -75,7 +73,7 @@ func (*AltsContext) ProtoMessage() {} func (x *AltsContext) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_altscontext_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -224,20 +222,6 @@ func file_grpc_gcp_altscontext_proto_init() { return } file_grpc_gcp_transport_security_common_proto_init() - if !protoimpl.UnsafeEnabled { - file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*AltsContext); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 79b5dad476c7..915b36df8214 100644 --- a/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/gcp/handshaker.proto @@ -154,11 +154,9 @@ type Endpoint struct { func (x *Endpoint) Reset() { *x = Endpoint{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_handshaker_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Endpoint) String() string { @@ -169,7 +167,7 @@ func (*Endpoint) ProtoMessage() {} func (x *Endpoint) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -221,11 +219,9 @@ type Identity struct { func (x *Identity) Reset() { *x = Identity{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_handshaker_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Identity) String() string { @@ -236,7 +232,7 @@ func (*Identity) ProtoMessage() {} func (x *Identity) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -340,11 +336,9 @@ type StartClientHandshakeReq struct { func (x *StartClientHandshakeReq) Reset() { *x = StartClientHandshakeReq{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_handshaker_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StartClientHandshakeReq) String() string { @@ -355,7 +349,7 @@ func (*StartClientHandshakeReq) ProtoMessage() {} func (x *StartClientHandshakeReq) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -467,11 +461,9 @@ type ServerHandshakeParameters struct { func (x *ServerHandshakeParameters) Reset() { *x = ServerHandshakeParameters{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_handshaker_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerHandshakeParameters) String() string { @@ -482,7 +474,7 @@ func (*ServerHandshakeParameters) ProtoMessage() {} func (x *ServerHandshakeParameters) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -549,11 +541,9 @@ type StartServerHandshakeReq struct { func (x *StartServerHandshakeReq) Reset() { *x = StartServerHandshakeReq{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_handshaker_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StartServerHandshakeReq) String() string { @@ -564,7 +554,7 @@ func (*StartServerHandshakeReq) ProtoMessage() {} func (x *StartServerHandshakeReq) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -645,11 +635,9 @@ type NextHandshakeMessageReq struct { func (x *NextHandshakeMessageReq) Reset() { *x = NextHandshakeMessageReq{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_handshaker_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NextHandshakeMessageReq) String() string { @@ -660,7 +648,7 @@ func (*NextHandshakeMessageReq) ProtoMessage() {} func (x *NextHandshakeMessageReq) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -704,11 +692,9 @@ type HandshakerReq struct { func (x *HandshakerReq) Reset() { *x = HandshakerReq{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_handshaker_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HandshakerReq) String() string { @@ -719,7 +705,7 @@ func (*HandshakerReq) ProtoMessage() {} func (x *HandshakerReq) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -816,11 +802,9 @@ type HandshakerResult struct { func (x *HandshakerResult) Reset() { *x = HandshakerResult{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_handshaker_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HandshakerResult) String() string { @@ -831,7 +815,7 @@ func (*HandshakerResult) ProtoMessage() {} func (x *HandshakerResult) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -915,11 +899,9 @@ type HandshakerStatus struct { func (x *HandshakerStatus) Reset() { *x = HandshakerStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_handshaker_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HandshakerStatus) String() string { @@ -930,7 +912,7 @@ func (*HandshakerStatus) ProtoMessage() {} func (x *HandshakerStatus) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -983,11 +965,9 @@ type HandshakerResp struct { func (x *HandshakerResp) Reset() { *x = HandshakerResp{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_handshaker_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_handshaker_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HandshakerResp) String() string { @@ -998,7 +978,7 @@ func (*HandshakerResp) ProtoMessage() {} func (x *HandshakerResp) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_handshaker_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1313,128 +1293,6 @@ func file_grpc_gcp_handshaker_proto_init() { return } file_grpc_gcp_transport_security_common_proto_init() - if !protoimpl.UnsafeEnabled { - file_grpc_gcp_handshaker_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Endpoint); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Identity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*StartClientHandshakeReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ServerHandshakeParameters); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*StartServerHandshakeReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*NextHandshakeMessageReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*HandshakerReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*HandshakerResult); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*HandshakerStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_handshaker_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*HandshakerResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []any{ (*Identity_ServiceAccount)(nil), (*Identity_Hostname)(nil), diff --git a/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index 6956c14f6a98..e9676db4b52a 100644 --- a/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/gcp/transport_security_common.proto @@ -102,11 +102,9 @@ type RpcProtocolVersions struct { func (x *RpcProtocolVersions) Reset() { *x = RpcProtocolVersions{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RpcProtocolVersions) String() string { @@ -117,7 +115,7 @@ func (*RpcProtocolVersions) ProtoMessage() {} func (x *RpcProtocolVersions) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -158,11 +156,9 @@ type RpcProtocolVersions_Version struct { func (x *RpcProtocolVersions_Version) Reset() { *x = RpcProtocolVersions_Version{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RpcProtocolVersions_Version) String() string { @@ -173,7 +169,7 @@ func (*RpcProtocolVersions_Version) ProtoMessage() {} func (x *RpcProtocolVersions_Version) ProtoReflect() protoreflect.Message { mi := &file_grpc_gcp_transport_security_common_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -273,32 +269,6 @@ func file_grpc_gcp_transport_security_common_proto_init() { if File_grpc_gcp_transport_security_common_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*RpcProtocolVersions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*RpcProtocolVersions_Version); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/examples/features/proto/echo/echo.pb.go b/examples/features/proto/echo/echo.pb.go index 243c47d626c7..6b23482b348b 100644 --- a/examples/features/proto/echo/echo.pb.go +++ b/examples/features/proto/echo/echo.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: examples/features/proto/echo/echo.proto @@ -48,11 +48,9 @@ type EchoRequest struct { func (x *EchoRequest) Reset() { *x = EchoRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_examples_features_proto_echo_echo_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_examples_features_proto_echo_echo_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EchoRequest) String() string { @@ -63,7 +61,7 @@ func (*EchoRequest) ProtoMessage() {} func (x *EchoRequest) ProtoReflect() protoreflect.Message { mi := &file_examples_features_proto_echo_echo_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -96,11 +94,9 @@ type EchoResponse struct { func (x *EchoResponse) Reset() { *x = EchoResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_examples_features_proto_echo_echo_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_examples_features_proto_echo_echo_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EchoResponse) String() string { @@ -111,7 +107,7 @@ func (*EchoResponse) ProtoMessage() {} func (x *EchoResponse) ProtoReflect() protoreflect.Message { mi := &file_examples_features_proto_echo_echo_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -213,32 +209,6 @@ func file_examples_features_proto_echo_echo_proto_init() { if File_examples_features_proto_echo_echo_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_examples_features_proto_echo_echo_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*EchoRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_examples_features_proto_echo_echo_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*EchoResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/examples/go.mod b/examples/go.mod index e22de4edf7e8..0dd53bee1cc1 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -4,47 +4,47 @@ go 1.22.7 require ( github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 - github.com/prometheus/client_golang v1.20.3 - go.opentelemetry.io/otel/exporters/prometheus v0.52.0 - go.opentelemetry.io/otel/sdk/metric v1.30.0 + github.com/prometheus/client_golang v1.20.5 + go.opentelemetry.io/otel/exporters/prometheus v0.53.0 + go.opentelemetry.io/otel/sdk/metric v1.31.0 golang.org/x/oauth2 v0.23.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 - google.golang.org/grpc v1.66.2 + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 + google.golang.org/grpc v1.67.1 google.golang.org/grpc/gcp/observability v1.0.1 google.golang.org/grpc/security/advancedtls v1.0.0 - google.golang.org/grpc/stats/opentelemetry v0.0.0-20240912061038-b6fde8cdd1c0 - google.golang.org/protobuf v1.34.2 + google.golang.org/grpc/stats/opentelemetry v0.0.0-20241017035653-830135e6c5a3 + google.golang.org/protobuf v1.35.1 ) require ( - cel.dev/expr v0.16.1 // indirect - cloud.google.com/go v0.115.1 // indirect - cloud.google.com/go/auth v0.9.4 // indirect + cel.dev/expr v0.16.2 // indirect + cloud.google.com/go v0.116.0 // indirect + cloud.google.com/go/auth v0.9.8 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/logging v1.11.0 // indirect - cloud.google.com/go/longrunning v0.6.0 // indirect - cloud.google.com/go/monitoring v1.21.0 // indirect - cloud.google.com/go/trace v1.11.0 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/logging v1.12.0 // indirect + cloud.google.com/go/longrunning v0.6.1 // indirect + cloud.google.com/go/monitoring v1.21.1 // indirect + cloud.google.com/go/trace v1.11.1 // indirect contrib.go.opencensus.io/exporter/stackdriver v0.13.15-0.20230702191903-2de6d2748484 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 // indirect - github.com/aws/aws-sdk-go-v2 v1.30.5 // indirect - github.com/aws/aws-sdk-go-v2/config v1.27.33 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.32 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3 // indirect + github.com/aws/aws-sdk-go-v2 v1.32.2 // indirect + github.com/aws/aws-sdk-go-v2/config v1.28.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.41 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 // indirect - github.com/aws/smithy-go v1.20.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 // indirect + github.com/aws/smithy-go v1.22.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/envoyproxy/go-control-plane v0.13.0 // indirect + github.com/envoyproxy/go-control-plane v0.13.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.2 // indirect @@ -55,29 +55,29 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect - github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.59.1 // indirect + github.com/prometheus/common v0.60.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/detectors/gcp v1.30.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect - go.opentelemetry.io/otel v1.30.0 // indirect - go.opentelemetry.io/otel/metric v1.30.0 // indirect - go.opentelemetry.io/otel/sdk v1.30.0 // indirect - go.opentelemetry.io/otel/trace v1.30.0 // indirect - golang.org/x/crypto v0.27.0 // indirect - golang.org/x/net v0.29.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.31.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect + go.opentelemetry.io/otel v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect + go.opentelemetry.io/otel/sdk v1.31.0 // indirect + go.opentelemetry.io/otel/trace v1.31.0 // indirect + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/net v0.30.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/text v0.18.0 // indirect - golang.org/x/time v0.6.0 // indirect - google.golang.org/api v0.197.0 // indirect - google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/time v0.7.0 // indirect + google.golang.org/api v0.201.0 // indirect + google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect google.golang.org/grpc/stats/opencensus v1.0.0 // indirect ) diff --git a/examples/go.sum b/examples/go.sum index c851debc3bb3..182b57b230cc 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -1,6 +1,6 @@ cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= -cel.dev/expr v0.16.1 h1:NR0+oFYzR1CqLFhTAqg3ql59G9VfN8fKq1TCHJ6gq1g= -cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= +cel.dev/expr v0.16.2 h1:RwRhoH17VhAu9U5CMvMhH1PDVgf0tuz9FT+24AfMLfU= +cel.dev/expr v0.16.2/go.mod h1:gXngZQMkWJoSbE8mOzehJlXQyubn/Vg0vR9/F3W7iw8= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -41,8 +41,8 @@ cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMz cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= -cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= -cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= @@ -119,8 +119,8 @@ cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEar cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= -cloud.google.com/go/auth v0.9.4 h1:DxF7imbEbiFu9+zdKC6cKBko1e8XeJnipNqIbWZ+kDI= -cloud.google.com/go/auth v0.9.4/go.mod h1:SHia8n6//Ya940F1rLimhJCjjx7KE17t0ctFEci3HkA= +cloud.google.com/go/auth v0.9.8 h1:+CSJ0Gw9iVeSENVCKJoLHhdUykDgXSc4Qn+gu2BRtR8= +cloud.google.com/go/auth v0.9.8/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= @@ -220,8 +220,8 @@ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1h cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= @@ -402,8 +402,8 @@ cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCta cloud.google.com/go/iam v1.0.1/go.mod h1:yR3tmSL8BcZB4bxByRv2jkSIahVmCtfKZwLYGBalRE8= cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk= cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= -cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8= -cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q= +cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU= +cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= @@ -441,16 +441,16 @@ cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/logging v1.11.0 h1:v3ktVzXMV7CwHq1MBF65wcqLMA7i+z3YxbUsoK7mOKs= -cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6RuuFOZ67+A= +cloud.google.com/go/logging v1.12.0 h1:ex1igYcGFd4S/RZWOCU51StlIEuey5bjqwH9ZYjHibk= +cloud.google.com/go/logging v1.12.0/go.mod h1:wwYBt5HlYP1InnrtYI0wtwttpVU1rifnMT7RejksUAM= cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= cloud.google.com/go/longrunning v0.4.2/go.mod h1:OHrnaYyLUV6oqwh0xiS7e5sLQhP1m0QU9R+WhGDMgIQ= cloud.google.com/go/longrunning v0.5.0/go.mod h1:0JNuqRShmscVAhIACGtskSAWtqtOoPkwP0YF1oVEchc= cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= -cloud.google.com/go/longrunning v0.6.0 h1:mM1ZmaNsQsnb+5n1DNPeL0KwQd9jQRqSqSDEkBZr+aI= -cloud.google.com/go/longrunning v0.6.0/go.mod h1:uHzSZqW89h7/pasCWNYdUpwGz3PcVWhrWupreVPYLts= +cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc= +cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= @@ -482,8 +482,8 @@ cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuu cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= -cloud.google.com/go/monitoring v1.21.0 h1:EMc0tB+d3lUewT2NzKC/hr8cSR9WsUieVywzIHetGro= -cloud.google.com/go/monitoring v1.21.0/go.mod h1:tuJ+KNDdJbetSsbSGTqnaBvbauS5kr3Q/koy3Up6r+4= +cloud.google.com/go/monitoring v1.21.1 h1:zWtbIoBMnU5LP9A/fz8LmWMGHpk4skdfeiaa66QdFGc= +cloud.google.com/go/monitoring v1.21.1/go.mod h1:Rj++LKrlht9uBi8+Eb530dIrzG/cU/lB8mt+lbeFK1c= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= @@ -708,8 +708,8 @@ cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1r cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= -cloud.google.com/go/trace v1.11.0 h1:UHX6cOJm45Zw/KIbqHe4kII8PupLt/V5tscZUkeiJVI= -cloud.google.com/go/trace v1.11.0/go.mod h1:Aiemdi52635dBR7o3zuc9lLjXo3BwGaChEjCa3tJNmM= +cloud.google.com/go/trace v1.11.1 h1:UNqdP+HYYtnm6lb91aNA5JQ0X14GnxkABGlfz2PzPew= +cloud.google.com/go/trace v1.11.1/go.mod h1:IQKNQuBzH72EGaXEodKlNJrWykGZxet2zgjtS60OtjA= cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= @@ -776,8 +776,8 @@ gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zum git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 h1:pB2F2JKCj1Znmp2rwxxt1J0Fg0wezTMgWYk5Mpbi1kg= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3 h1:cb3br57K508pQEFgBxn9GDhPS9HefpyMPK1RzmtMNzk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= @@ -789,32 +789,32 @@ github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0I github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= -github.com/aws/aws-sdk-go-v2 v1.30.5 h1:mWSRTwQAb0aLE17dSzztCVJWI9+cRMgqebndjwDyK0g= -github.com/aws/aws-sdk-go-v2 v1.30.5/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= -github.com/aws/aws-sdk-go-v2/config v1.27.33 h1:Nof9o/MsmH4oa0s2q9a0k7tMz5x/Yj5k06lDODWz3BU= -github.com/aws/aws-sdk-go-v2/config v1.27.33/go.mod h1:kEqdYzRb8dd8Sy2pOdEbExTTF5v7ozEXX0McgPE7xks= -github.com/aws/aws-sdk-go-v2/credentials v1.17.32 h1:7Cxhp/BnT2RcGy4VisJ9miUPecY+lyE9I8JvcZofn9I= -github.com/aws/aws-sdk-go-v2/credentials v1.17.32/go.mod h1:P5/QMF3/DCHbXGEGkdbilXHsyTBX5D3HSwcrSc9p20I= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 h1:pfQ2sqNpMVK6xz2RbqLEL0GH87JOwSxPV2rzm8Zsb74= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13/go.mod h1:NG7RXPUlqfsCLLFfi0+IpKN4sCB9D9fw/qTaSB+xRoU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 h1:pI7Bzt0BJtYA0N/JEC6B8fJ4RBrEMi1LBrkMdFYNSnQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17/go.mod h1:Dh5zzJYMtxfIjYW+/evjQ8uj2OyR/ve2KROHGHlSFqE= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 h1:Mqr/V5gvrhA2gvgnF42Zh5iMiQNcOYthFYwCyrnuWlc= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17/go.mod h1:aLJpZlCmjE+V+KtN1q1uyZkfnUWpQGpbsn89XPKyzfU= +github.com/aws/aws-sdk-go-v2 v1.32.2 h1:AkNLZEyYMLnx/Q/mSKkcMqwNFXMAvFto9bNsHqcTduI= +github.com/aws/aws-sdk-go-v2 v1.32.2/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= +github.com/aws/aws-sdk-go-v2/config v1.28.0 h1:FosVYWcqEtWNxHn8gB/Vs6jOlNwSoyOCA/g/sxyySOQ= +github.com/aws/aws-sdk-go-v2/config v1.28.0/go.mod h1:pYhbtvg1siOOg8h5an77rXle9tVG8T+BWLWAo7cOukc= +github.com/aws/aws-sdk-go-v2/credentials v1.17.41 h1:7gXo+Axmp+R4Z+AK8YFQO0ZV3L0gizGINCOWxSLY9W8= +github.com/aws/aws-sdk-go-v2/credentials v1.17.41/go.mod h1:u4Eb8d3394YLubphT4jLEwN1rLNq2wFOlT6OuxFwPzU= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 h1:TMH3f/SCAWdNtXXVPPu5D6wrr4G5hI1rAxbcocKfC7Q= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17/go.mod h1:1ZRXLdTpzdJb9fwTMXiLipENRxkGMTn1sfKexGllQCw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 h1:UAsR3xA31QGf79WzpG/ixT9FZvQlh5HY1NRqSHBNOCk= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21/go.mod h1:JNr43NFf5L9YaG3eKTm7HQzls9J+A9YYcGI5Quh1r2Y= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 h1:6jZVETqmYCadGFvrYEQfC5fAQmlo80CeL5psbno6r0s= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21/go.mod h1:1SR0GbLlnN3QUmYaflZNiH1ql+1qrSiB2vwcJ+4UM60= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 h1:rfprUlsdzgl7ZL2KlXiUAoJnI/VxfHCvDFr2QDFj6u4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19/go.mod h1:SCWkEdRq8/7EK60NcvvQ6NXKuTcchAD4ROAsC37VEZE= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 h1:pIaGg+08llrP7Q5aiz9ICWbY8cqhTkyy+0SHvfzQpTc= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.7/go.mod h1:eEygMHnTKH/3kNp9Jr1n3PdejuSNcgwLe1dWgQtO0VQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 h1:/Cfdu0XV3mONYKaOt1Gr0k1KvQzkzPyiKUdlWJqy+J4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7/go.mod h1:bCbAxKDqNvkHxRaIMnyVPXPo+OaPRwvmgzMxbz1VKSA= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 h1:NKTa1eqZYw8tiHSRGpP0VtTdub/8KNk8sDkNPFaOKDE= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.7/go.mod h1:NXi1dIAGteSaRLqYgarlhP/Ij0cFT+qmCwiJqWh/U5o= -github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= -github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 h1:s7NA1SOw8q/5c0wr8477yOPp0z+uBaXBnLE0XYb0POA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2/go.mod h1:fnjjWyAW/Pj5HYOxl9LJqWtEwS7W2qgcRLWP+uWbss0= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 h1:bSYXVyUzoTHoKalBmwaZxs97HU9DWWI3ehHSAMa7xOk= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.2/go.mod h1:skMqY7JElusiOUjMJMOv1jJsP7YUg7DrhgqZZWuzu1U= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 h1:AhmO1fHINP9vFYUE0LHzCWg/LfUWUF+zFPEcY9QXb7o= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2/go.mod h1:o8aQygT2+MVP0NaV6kbdE1YnnIM8RRVQzoeUH45GOdI= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 h1:CiS7i0+FUe+/YY1GvIBLLrR/XNGZ4CtM1Ll0XavNuVo= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.2/go.mod h1:HtaiBI8CjYoNVde8arShXb94UbQQi9L4EMr6D+xGBwo= +github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= +github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bazelbuild/rules_go v0.49.0/go.mod h1:Dhcz716Kqg1RHNWos+N6MlXNkjNP2EwZQ0LukRKJfMs= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -852,8 +852,8 @@ github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+m github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= -github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= -github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= @@ -1020,8 +1020,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -1060,16 +1060,16 @@ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgm github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4= -github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= -github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -1117,24 +1117,24 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/detectors/gcp v1.30.0 h1:GF+YVnUeJwOy+Ag2cTEpVZq+r2Tnci42FIiNwA2gjME= -go.opentelemetry.io/contrib/detectors/gcp v1.30.0/go.mod h1:p5Av42vWKPezk67MQwLYZwlo/z6xLnN/upaIyQNWBGg= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0 h1:hCq2hNMwsegUvPzI7sPOvtO9cqyy5GbWt/Ybp2xrx8Q= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0/go.mod h1:LqaApwGx/oUmzsbqxkzuBvyoPpkxk3JQWnqfVrJ3wCA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= -go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= -go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= -go.opentelemetry.io/otel/exporters/prometheus v0.52.0 h1:kmU3H0b9ufFSi8IQCcxack+sWUblKkFbqWYs6YiACGQ= -go.opentelemetry.io/otel/exporters/prometheus v0.52.0/go.mod h1:+wsAp2+JhuGXX7YRkjlkx6hyWY3ogFPfNA4x3nyiAh0= -go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= -go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= -go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= -go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= -go.opentelemetry.io/otel/sdk/metric v1.30.0 h1:QJLT8Pe11jyHBHfSAgYH7kEmT24eX792jZO1bo4BXkM= -go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y= -go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= -go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= +go.opentelemetry.io/contrib/detectors/gcp v1.31.0 h1:G1JQOreVrfhRkner+l4mrGxmfqYCAuy76asTDAo0xsA= +go.opentelemetry.io/contrib/detectors/gcp v1.31.0/go.mod h1:tzQL6E1l+iV44YFTkcAeNQqzXUiekSYP9jjJjXwEd00= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/exporters/prometheus v0.53.0 h1:QXobPHrwiGLM4ufrY3EOmDPJpo2P90UuFau4CDPJA/I= +go.opentelemetry.io/otel/exporters/prometheus v0.53.0/go.mod h1:WOAXGr3D00CfzmFxtTV1eR0GpoHuPEu+HJT8UWW2SIU= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -1162,8 +1162,9 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1291,8 +1292,9 @@ golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1431,9 +1433,11 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1452,7 +1456,8 @@ golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1473,16 +1478,17 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1628,8 +1634,8 @@ google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZ google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= -google.golang.org/api v0.197.0 h1:x6CwqQLsFiA5JKAiGyGBjc2bNtHtLddhJCE2IKuhhcQ= -google.golang.org/api v0.197.0/go.mod h1:AuOuo20GoQ331nq7DquGHlU6d+2wN2fZ8O0ta60nRNw= +google.golang.org/api v0.201.0 h1:+7AD9JNM3tREtawRMu8sOjSbb8VYcYXJG/2eEOmfDu0= +google.golang.org/api v0.201.0/go.mod h1:HVY0FCHVs89xIW9fzf/pBvOEm+OolHa86G/txFezyq4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -1775,8 +1781,8 @@ google.golang.org/genproto v0.0.0-20230629202037-9506855d4529/go.mod h1:xZnkP7mR google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108= google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 h1:Df6WuGvthPzc+JiQ/G+m+sNX24kc0aTBqoDN/0yyykE= +google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53/go.mod h1:fheguH3Am2dGp1LfXkrvwqC/KlFq8F0nLq3LryOMrrE= google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= @@ -1787,8 +1793,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go. google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= @@ -1804,9 +1810,9 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= google.golang.org/grpc/gcp/observability v1.0.1 h1:2IQ7szW1gobfZaS/sDSAu2uxO0V/aTryMZvlcyqKqQA= @@ -1836,8 +1842,9 @@ google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/examples/helloworld/helloworld/helloworld.pb.go b/examples/helloworld/helloworld/helloworld.pb.go index efe2bbe9d208..b8351f257707 100644 --- a/examples/helloworld/helloworld/helloworld.pb.go +++ b/examples/helloworld/helloworld/helloworld.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: examples/helloworld/helloworld/helloworld.proto @@ -45,11 +45,9 @@ type HelloRequest struct { func (x *HelloRequest) Reset() { *x = HelloRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_examples_helloworld_helloworld_helloworld_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_examples_helloworld_helloworld_helloworld_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HelloRequest) String() string { @@ -60,7 +58,7 @@ func (*HelloRequest) ProtoMessage() {} func (x *HelloRequest) ProtoReflect() protoreflect.Message { mi := &file_examples_helloworld_helloworld_helloworld_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -93,11 +91,9 @@ type HelloReply struct { func (x *HelloReply) Reset() { *x = HelloReply{} - if protoimpl.UnsafeEnabled { - mi := &file_examples_helloworld_helloworld_helloworld_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_examples_helloworld_helloworld_helloworld_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HelloReply) String() string { @@ -108,7 +104,7 @@ func (*HelloReply) ProtoMessage() {} func (x *HelloReply) ProtoReflect() protoreflect.Message { mi := &file_examples_helloworld_helloworld_helloworld_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -188,32 +184,6 @@ func file_examples_helloworld_helloworld_helloworld_proto_init() { if File_examples_helloworld_helloworld_helloworld_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_examples_helloworld_helloworld_helloworld_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*HelloRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_examples_helloworld_helloworld_helloworld_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*HelloReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/examples/route_guide/routeguide/route_guide.pb.go b/examples/route_guide/routeguide/route_guide.pb.go index eb5f2436d61d..5a8babf15aaa 100644 --- a/examples/route_guide/routeguide/route_guide.pb.go +++ b/examples/route_guide/routeguide/route_guide.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: examples/route_guide/routeguide/route_guide.proto @@ -49,11 +49,9 @@ type Point struct { func (x *Point) Reset() { *x = Point{} - if protoimpl.UnsafeEnabled { - mi := &file_examples_route_guide_routeguide_route_guide_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_examples_route_guide_routeguide_route_guide_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Point) String() string { @@ -64,7 +62,7 @@ func (*Point) ProtoMessage() {} func (x *Point) ProtoReflect() protoreflect.Message { mi := &file_examples_route_guide_routeguide_route_guide_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -108,11 +106,9 @@ type Rectangle struct { func (x *Rectangle) Reset() { *x = Rectangle{} - if protoimpl.UnsafeEnabled { - mi := &file_examples_route_guide_routeguide_route_guide_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_examples_route_guide_routeguide_route_guide_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Rectangle) String() string { @@ -123,7 +119,7 @@ func (*Rectangle) ProtoMessage() {} func (x *Rectangle) ProtoReflect() protoreflect.Message { mi := &file_examples_route_guide_routeguide_route_guide_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -168,11 +164,9 @@ type Feature struct { func (x *Feature) Reset() { *x = Feature{} - if protoimpl.UnsafeEnabled { - mi := &file_examples_route_guide_routeguide_route_guide_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_examples_route_guide_routeguide_route_guide_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Feature) String() string { @@ -183,7 +177,7 @@ func (*Feature) ProtoMessage() {} func (x *Feature) ProtoReflect() protoreflect.Message { mi := &file_examples_route_guide_routeguide_route_guide_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -226,11 +220,9 @@ type RouteNote struct { func (x *RouteNote) Reset() { *x = RouteNote{} - if protoimpl.UnsafeEnabled { - mi := &file_examples_route_guide_routeguide_route_guide_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_examples_route_guide_routeguide_route_guide_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RouteNote) String() string { @@ -241,7 +233,7 @@ func (*RouteNote) ProtoMessage() {} func (x *RouteNote) ProtoReflect() protoreflect.Message { mi := &file_examples_route_guide_routeguide_route_guide_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -292,11 +284,9 @@ type RouteSummary struct { func (x *RouteSummary) Reset() { *x = RouteSummary{} - if protoimpl.UnsafeEnabled { - mi := &file_examples_route_guide_routeguide_route_guide_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_examples_route_guide_routeguide_route_guide_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RouteSummary) String() string { @@ -307,7 +297,7 @@ func (*RouteSummary) ProtoMessage() {} func (x *RouteSummary) ProtoReflect() protoreflect.Message { mi := &file_examples_route_guide_routeguide_route_guide_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -458,68 +448,6 @@ func file_examples_route_guide_routeguide_route_guide_proto_init() { if File_examples_route_guide_routeguide_route_guide_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_examples_route_guide_routeguide_route_guide_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Point); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_examples_route_guide_routeguide_route_guide_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Rectangle); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_examples_route_guide_routeguide_route_guide_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*Feature); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_examples_route_guide_routeguide_route_guide_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*RouteNote); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_examples_route_guide_routeguide_route_guide_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*RouteSummary); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index 71877d05a6ab..cd9fdb54e229 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -3,38 +3,38 @@ module google.golang.org/grpc/gcp/observability go 1.22.7 require ( - cloud.google.com/go/logging v1.11.0 + cloud.google.com/go/logging v1.12.0 contrib.go.opencensus.io/exporter/stackdriver v0.13.15-0.20230702191903-2de6d2748484 github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 go.opencensus.io v0.24.0 golang.org/x/oauth2 v0.23.0 - google.golang.org/api v0.197.0 - google.golang.org/grpc v1.66.2 + google.golang.org/api v0.201.0 + google.golang.org/grpc v1.67.1 google.golang.org/grpc/stats/opencensus v1.0.0 ) require ( - cloud.google.com/go v0.115.1 // indirect - cloud.google.com/go/auth v0.9.4 // indirect + cloud.google.com/go v0.116.0 // indirect + cloud.google.com/go/auth v0.9.8 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/longrunning v0.6.0 // indirect - cloud.google.com/go/monitoring v1.21.0 // indirect - cloud.google.com/go/trace v1.11.0 // indirect - github.com/aws/aws-sdk-go-v2 v1.30.5 // indirect - github.com/aws/aws-sdk-go-v2/config v1.27.33 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.32 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/longrunning v0.6.1 // indirect + cloud.google.com/go/monitoring v1.21.1 // indirect + cloud.google.com/go/trace v1.11.1 // indirect + github.com/aws/aws-sdk-go-v2 v1.32.2 // indirect + github.com/aws/aws-sdk-go-v2/config v1.28.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.41 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 // indirect - github.com/aws/smithy-go v1.20.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 // indirect + github.com/aws/smithy-go v1.22.0 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.2 // indirect @@ -44,21 +44,21 @@ require ( github.com/google/s2a-go v0.1.8 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect - go.opentelemetry.io/otel v1.30.0 // indirect - go.opentelemetry.io/otel/metric v1.30.0 // indirect - go.opentelemetry.io/otel/trace v1.30.0 // indirect - golang.org/x/crypto v0.27.0 // indirect - golang.org/x/net v0.29.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect + go.opentelemetry.io/otel v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect + go.opentelemetry.io/otel/trace v1.31.0 // indirect + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/net v0.30.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/text v0.18.0 // indirect - golang.org/x/time v0.6.0 // indirect - google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/protobuf v1.34.2 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/time v0.7.0 // indirect + google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/protobuf v1.35.1 // indirect ) replace google.golang.org/grpc => ../.. diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index 6e9fe5ba37a6..a328c2053c70 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -1,5 +1,5 @@ cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= -cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= +cel.dev/expr v0.16.2/go.mod h1:gXngZQMkWJoSbE8mOzehJlXQyubn/Vg0vR9/F3W7iw8= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -40,8 +40,8 @@ cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMz cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= -cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= -cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= @@ -118,8 +118,8 @@ cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEar cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= -cloud.google.com/go/auth v0.9.4 h1:DxF7imbEbiFu9+zdKC6cKBko1e8XeJnipNqIbWZ+kDI= -cloud.google.com/go/auth v0.9.4/go.mod h1:SHia8n6//Ya940F1rLimhJCjjx7KE17t0ctFEci3HkA= +cloud.google.com/go/auth v0.9.8 h1:+CSJ0Gw9iVeSENVCKJoLHhdUykDgXSc4Qn+gu2BRtR8= +cloud.google.com/go/auth v0.9.8/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= @@ -219,8 +219,8 @@ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1h cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= @@ -401,8 +401,8 @@ cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCta cloud.google.com/go/iam v1.0.1/go.mod h1:yR3tmSL8BcZB4bxByRv2jkSIahVmCtfKZwLYGBalRE8= cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk= cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= -cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8= -cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q= +cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU= +cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= @@ -440,16 +440,16 @@ cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/logging v1.11.0 h1:v3ktVzXMV7CwHq1MBF65wcqLMA7i+z3YxbUsoK7mOKs= -cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6RuuFOZ67+A= +cloud.google.com/go/logging v1.12.0 h1:ex1igYcGFd4S/RZWOCU51StlIEuey5bjqwH9ZYjHibk= +cloud.google.com/go/logging v1.12.0/go.mod h1:wwYBt5HlYP1InnrtYI0wtwttpVU1rifnMT7RejksUAM= cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= cloud.google.com/go/longrunning v0.4.2/go.mod h1:OHrnaYyLUV6oqwh0xiS7e5sLQhP1m0QU9R+WhGDMgIQ= cloud.google.com/go/longrunning v0.5.0/go.mod h1:0JNuqRShmscVAhIACGtskSAWtqtOoPkwP0YF1oVEchc= cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= -cloud.google.com/go/longrunning v0.6.0 h1:mM1ZmaNsQsnb+5n1DNPeL0KwQd9jQRqSqSDEkBZr+aI= -cloud.google.com/go/longrunning v0.6.0/go.mod h1:uHzSZqW89h7/pasCWNYdUpwGz3PcVWhrWupreVPYLts= +cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc= +cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= @@ -481,8 +481,8 @@ cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuu cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= -cloud.google.com/go/monitoring v1.21.0 h1:EMc0tB+d3lUewT2NzKC/hr8cSR9WsUieVywzIHetGro= -cloud.google.com/go/monitoring v1.21.0/go.mod h1:tuJ+KNDdJbetSsbSGTqnaBvbauS5kr3Q/koy3Up6r+4= +cloud.google.com/go/monitoring v1.21.1 h1:zWtbIoBMnU5LP9A/fz8LmWMGHpk4skdfeiaa66QdFGc= +cloud.google.com/go/monitoring v1.21.1/go.mod h1:Rj++LKrlht9uBi8+Eb530dIrzG/cU/lB8mt+lbeFK1c= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= @@ -707,8 +707,8 @@ cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1r cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= -cloud.google.com/go/trace v1.11.0 h1:UHX6cOJm45Zw/KIbqHe4kII8PupLt/V5tscZUkeiJVI= -cloud.google.com/go/trace v1.11.0/go.mod h1:Aiemdi52635dBR7o3zuc9lLjXo3BwGaChEjCa3tJNmM= +cloud.google.com/go/trace v1.11.1 h1:UNqdP+HYYtnm6lb91aNA5JQ0X14GnxkABGlfz2PzPew= +cloud.google.com/go/trace v1.11.1/go.mod h1:IQKNQuBzH72EGaXEodKlNJrWykGZxet2zgjtS60OtjA= cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= @@ -786,32 +786,32 @@ github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0I github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= -github.com/aws/aws-sdk-go-v2 v1.30.5 h1:mWSRTwQAb0aLE17dSzztCVJWI9+cRMgqebndjwDyK0g= -github.com/aws/aws-sdk-go-v2 v1.30.5/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= -github.com/aws/aws-sdk-go-v2/config v1.27.33 h1:Nof9o/MsmH4oa0s2q9a0k7tMz5x/Yj5k06lDODWz3BU= -github.com/aws/aws-sdk-go-v2/config v1.27.33/go.mod h1:kEqdYzRb8dd8Sy2pOdEbExTTF5v7ozEXX0McgPE7xks= -github.com/aws/aws-sdk-go-v2/credentials v1.17.32 h1:7Cxhp/BnT2RcGy4VisJ9miUPecY+lyE9I8JvcZofn9I= -github.com/aws/aws-sdk-go-v2/credentials v1.17.32/go.mod h1:P5/QMF3/DCHbXGEGkdbilXHsyTBX5D3HSwcrSc9p20I= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 h1:pfQ2sqNpMVK6xz2RbqLEL0GH87JOwSxPV2rzm8Zsb74= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13/go.mod h1:NG7RXPUlqfsCLLFfi0+IpKN4sCB9D9fw/qTaSB+xRoU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 h1:pI7Bzt0BJtYA0N/JEC6B8fJ4RBrEMi1LBrkMdFYNSnQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17/go.mod h1:Dh5zzJYMtxfIjYW+/evjQ8uj2OyR/ve2KROHGHlSFqE= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 h1:Mqr/V5gvrhA2gvgnF42Zh5iMiQNcOYthFYwCyrnuWlc= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17/go.mod h1:aLJpZlCmjE+V+KtN1q1uyZkfnUWpQGpbsn89XPKyzfU= +github.com/aws/aws-sdk-go-v2 v1.32.2 h1:AkNLZEyYMLnx/Q/mSKkcMqwNFXMAvFto9bNsHqcTduI= +github.com/aws/aws-sdk-go-v2 v1.32.2/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= +github.com/aws/aws-sdk-go-v2/config v1.28.0 h1:FosVYWcqEtWNxHn8gB/Vs6jOlNwSoyOCA/g/sxyySOQ= +github.com/aws/aws-sdk-go-v2/config v1.28.0/go.mod h1:pYhbtvg1siOOg8h5an77rXle9tVG8T+BWLWAo7cOukc= +github.com/aws/aws-sdk-go-v2/credentials v1.17.41 h1:7gXo+Axmp+R4Z+AK8YFQO0ZV3L0gizGINCOWxSLY9W8= +github.com/aws/aws-sdk-go-v2/credentials v1.17.41/go.mod h1:u4Eb8d3394YLubphT4jLEwN1rLNq2wFOlT6OuxFwPzU= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 h1:TMH3f/SCAWdNtXXVPPu5D6wrr4G5hI1rAxbcocKfC7Q= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17/go.mod h1:1ZRXLdTpzdJb9fwTMXiLipENRxkGMTn1sfKexGllQCw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 h1:UAsR3xA31QGf79WzpG/ixT9FZvQlh5HY1NRqSHBNOCk= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21/go.mod h1:JNr43NFf5L9YaG3eKTm7HQzls9J+A9YYcGI5Quh1r2Y= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 h1:6jZVETqmYCadGFvrYEQfC5fAQmlo80CeL5psbno6r0s= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21/go.mod h1:1SR0GbLlnN3QUmYaflZNiH1ql+1qrSiB2vwcJ+4UM60= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 h1:rfprUlsdzgl7ZL2KlXiUAoJnI/VxfHCvDFr2QDFj6u4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19/go.mod h1:SCWkEdRq8/7EK60NcvvQ6NXKuTcchAD4ROAsC37VEZE= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 h1:pIaGg+08llrP7Q5aiz9ICWbY8cqhTkyy+0SHvfzQpTc= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.7/go.mod h1:eEygMHnTKH/3kNp9Jr1n3PdejuSNcgwLe1dWgQtO0VQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 h1:/Cfdu0XV3mONYKaOt1Gr0k1KvQzkzPyiKUdlWJqy+J4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7/go.mod h1:bCbAxKDqNvkHxRaIMnyVPXPo+OaPRwvmgzMxbz1VKSA= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 h1:NKTa1eqZYw8tiHSRGpP0VtTdub/8KNk8sDkNPFaOKDE= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.7/go.mod h1:NXi1dIAGteSaRLqYgarlhP/Ij0cFT+qmCwiJqWh/U5o= -github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= -github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 h1:s7NA1SOw8q/5c0wr8477yOPp0z+uBaXBnLE0XYb0POA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2/go.mod h1:fnjjWyAW/Pj5HYOxl9LJqWtEwS7W2qgcRLWP+uWbss0= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 h1:bSYXVyUzoTHoKalBmwaZxs97HU9DWWI3ehHSAMa7xOk= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.2/go.mod h1:skMqY7JElusiOUjMJMOv1jJsP7YUg7DrhgqZZWuzu1U= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 h1:AhmO1fHINP9vFYUE0LHzCWg/LfUWUF+zFPEcY9QXb7o= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2/go.mod h1:o8aQygT2+MVP0NaV6kbdE1YnnIM8RRVQzoeUH45GOdI= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 h1:CiS7i0+FUe+/YY1GvIBLLrR/XNGZ4CtM1Ll0XavNuVo= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.2/go.mod h1:HtaiBI8CjYoNVde8arShXb94UbQQi9L4EMr6D+xGBwo= +github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= +github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bazelbuild/rules_go v0.49.0/go.mod h1:Dhcz716Kqg1RHNWos+N6MlXNkjNP2EwZQ0LukRKJfMs= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= @@ -845,7 +845,7 @@ github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+m github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= -github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= @@ -1093,18 +1093,18 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0 h1:hCq2hNMwsegUvPzI7sPOvtO9cqyy5GbWt/Ybp2xrx8Q= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0/go.mod h1:LqaApwGx/oUmzsbqxkzuBvyoPpkxk3JQWnqfVrJ3wCA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= -go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= -go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= -go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= -go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= -go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= -go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= -go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -1132,8 +1132,9 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1261,8 +1262,9 @@ golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1401,9 +1403,11 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1422,7 +1426,8 @@ golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1443,16 +1448,17 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1598,8 +1604,8 @@ google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZ google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= -google.golang.org/api v0.197.0 h1:x6CwqQLsFiA5JKAiGyGBjc2bNtHtLddhJCE2IKuhhcQ= -google.golang.org/api v0.197.0/go.mod h1:AuOuo20GoQ331nq7DquGHlU6d+2wN2fZ8O0ta60nRNw= +google.golang.org/api v0.201.0 h1:+7AD9JNM3tREtawRMu8sOjSbb8VYcYXJG/2eEOmfDu0= +google.golang.org/api v0.201.0/go.mod h1:HVY0FCHVs89xIW9fzf/pBvOEm+OolHa86G/txFezyq4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -1745,8 +1751,8 @@ google.golang.org/genproto v0.0.0-20230629202037-9506855d4529/go.mod h1:xZnkP7mR google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108= google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 h1:Df6WuGvthPzc+JiQ/G+m+sNX24kc0aTBqoDN/0yyykE= +google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53/go.mod h1:fheguH3Am2dGp1LfXkrvwqC/KlFq8F0nLq3LryOMrrE= google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= @@ -1757,8 +1763,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go. google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= @@ -1774,9 +1780,9 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= google.golang.org/grpc/stats/opencensus v1.0.0 h1:evSYcRZaSToQp+borzWE52+03joezZeXcKJvZDfkUJA= @@ -1802,8 +1808,9 @@ google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/go.mod b/go.mod index 16167bada986..d87907da3855 100644 --- a/go.mod +++ b/go.mod @@ -5,24 +5,24 @@ go 1.22.7 require ( github.com/cespare/xxhash/v2 v2.3.0 github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 - github.com/envoyproxy/go-control-plane v0.13.0 + github.com/envoyproxy/go-control-plane v0.13.1 github.com/golang/glog v1.2.2 github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 - golang.org/x/net v0.29.0 + golang.org/x/net v0.30.0 golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.8.0 - golang.org/x/sys v0.25.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 - google.golang.org/protobuf v1.34.2 + golang.org/x/sys v0.26.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 + google.golang.org/protobuf v1.35.1 ) require ( - cel.dev/expr v0.16.1 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect + cel.dev/expr v0.16.2 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect - golang.org/x/text v0.18.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect + golang.org/x/text v0.19.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect ) diff --git a/go.sum b/go.sum index 08368e1bffa8..c62416214d25 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ -cel.dev/expr v0.16.1 h1:NR0+oFYzR1CqLFhTAqg3ql59G9VfN8fKq1TCHJ6gq1g= -cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cel.dev/expr v0.16.2 h1:RwRhoH17VhAu9U5CMvMhH1PDVgf0tuz9FT+24AfMLfU= +cel.dev/expr v0.16.2/go.mod h1:gXngZQMkWJoSbE8mOzehJlXQyubn/Vg0vR9/F3W7iw8= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= @@ -10,8 +10,8 @@ github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8E github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= -github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= @@ -28,21 +28,21 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/health/grpc_health_v1/health.pb.go b/health/grpc_health_v1/health.pb.go index d92335445f65..26e16d91924f 100644 --- a/health/grpc_health_v1/health.pb.go +++ b/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/health/v1/health.proto @@ -99,11 +99,9 @@ type HealthCheckRequest struct { func (x *HealthCheckRequest) Reset() { *x = HealthCheckRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_health_v1_health_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_health_v1_health_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HealthCheckRequest) String() string { @@ -114,7 +112,7 @@ func (*HealthCheckRequest) ProtoMessage() {} func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_health_v1_health_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -146,11 +144,9 @@ type HealthCheckResponse struct { func (x *HealthCheckResponse) Reset() { *x = HealthCheckResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_health_v1_health_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_health_v1_health_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HealthCheckResponse) String() string { @@ -161,7 +157,7 @@ func (*HealthCheckResponse) ProtoMessage() {} func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_health_v1_health_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -260,32 +256,6 @@ func file_grpc_health_v1_health_proto_init() { if File_grpc_health_v1_health_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*HealthCheckRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*HealthCheckResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/internal/proto/grpc_lookup_v1/rls.pb.go b/internal/proto/grpc_lookup_v1/rls.pb.go index 703091047b4b..14185ca35a0c 100644 --- a/internal/proto/grpc_lookup_v1/rls.pb.go +++ b/internal/proto/grpc_lookup_v1/rls.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/lookup/v1/rls.proto @@ -105,11 +105,9 @@ type RouteLookupRequest struct { func (x *RouteLookupRequest) Reset() { *x = RouteLookupRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lookup_v1_rls_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RouteLookupRequest) String() string { @@ -120,7 +118,7 @@ func (*RouteLookupRequest) ProtoMessage() {} func (x *RouteLookupRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -189,11 +187,9 @@ type RouteLookupResponse struct { func (x *RouteLookupResponse) Reset() { *x = RouteLookupResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lookup_v1_rls_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RouteLookupResponse) String() string { @@ -204,7 +200,7 @@ func (*RouteLookupResponse) ProtoMessage() {} func (x *RouteLookupResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -339,32 +335,6 @@ func file_grpc_lookup_v1_rls_proto_init() { if File_grpc_lookup_v1_rls_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_lookup_v1_rls_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*RouteLookupRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lookup_v1_rls_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*RouteLookupResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/internal/proto/grpc_lookup_v1/rls_config.pb.go b/internal/proto/grpc_lookup_v1/rls_config.pb.go index a0be3c8cb268..1549a7aa13a3 100644 --- a/internal/proto/grpc_lookup_v1/rls_config.pb.go +++ b/internal/proto/grpc_lookup_v1/rls_config.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/lookup/v1/rls_config.proto @@ -59,11 +59,9 @@ type NameMatcher struct { func (x *NameMatcher) Reset() { *x = NameMatcher{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NameMatcher) String() string { @@ -74,7 +72,7 @@ func (*NameMatcher) ProtoMessage() {} func (x *NameMatcher) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -131,11 +129,9 @@ type GrpcKeyBuilder struct { func (x *GrpcKeyBuilder) Reset() { *x = GrpcKeyBuilder{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GrpcKeyBuilder) String() string { @@ -146,7 +142,7 @@ func (*GrpcKeyBuilder) ProtoMessage() {} func (x *GrpcKeyBuilder) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -277,11 +273,9 @@ type HttpKeyBuilder struct { func (x *HttpKeyBuilder) Reset() { *x = HttpKeyBuilder{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HttpKeyBuilder) String() string { @@ -292,7 +286,7 @@ func (*HttpKeyBuilder) ProtoMessage() {} func (x *HttpKeyBuilder) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -400,11 +394,9 @@ type RouteLookupConfig struct { func (x *RouteLookupConfig) Reset() { *x = RouteLookupConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RouteLookupConfig) String() string { @@ -415,7 +407,7 @@ func (*RouteLookupConfig) ProtoMessage() {} func (x *RouteLookupConfig) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -506,11 +498,9 @@ type RouteLookupClusterSpecifier struct { func (x *RouteLookupClusterSpecifier) Reset() { *x = RouteLookupClusterSpecifier{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RouteLookupClusterSpecifier) String() string { @@ -521,7 +511,7 @@ func (*RouteLookupClusterSpecifier) ProtoMessage() {} func (x *RouteLookupClusterSpecifier) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -558,11 +548,9 @@ type GrpcKeyBuilder_Name struct { func (x *GrpcKeyBuilder_Name) Reset() { *x = GrpcKeyBuilder_Name{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GrpcKeyBuilder_Name) String() string { @@ -573,7 +561,7 @@ func (*GrpcKeyBuilder_Name) ProtoMessage() {} func (x *GrpcKeyBuilder_Name) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -620,11 +608,9 @@ type GrpcKeyBuilder_ExtraKeys struct { func (x *GrpcKeyBuilder_ExtraKeys) Reset() { *x = GrpcKeyBuilder_ExtraKeys{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GrpcKeyBuilder_ExtraKeys) String() string { @@ -635,7 +621,7 @@ func (*GrpcKeyBuilder_ExtraKeys) ProtoMessage() {} func (x *GrpcKeyBuilder_ExtraKeys) ProtoReflect() protoreflect.Message { mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -842,92 +828,6 @@ func file_grpc_lookup_v1_rls_config_proto_init() { if File_grpc_lookup_v1_rls_config_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_lookup_v1_rls_config_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*NameMatcher); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lookup_v1_rls_config_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*GrpcKeyBuilder); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lookup_v1_rls_config_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*HttpKeyBuilder); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lookup_v1_rls_config_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*RouteLookupConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lookup_v1_rls_config_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*RouteLookupClusterSpecifier); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lookup_v1_rls_config_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*GrpcKeyBuilder_Name); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_lookup_v1_rls_config_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*GrpcKeyBuilder_ExtraKeys); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/interop/grpc_testing/benchmark_service.pb.go b/interop/grpc_testing/benchmark_service.pb.go index 3ac1ee37eb69..365bca71c141 100644 --- a/interop/grpc_testing/benchmark_service.pb.go +++ b/interop/grpc_testing/benchmark_service.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/testing/benchmark_service.proto diff --git a/interop/grpc_testing/control.pb.go b/interop/grpc_testing/control.pb.go index e7ca21dcf217..96f1252a10c5 100644 --- a/interop/grpc_testing/control.pb.go +++ b/interop/grpc_testing/control.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/testing/control.proto @@ -212,11 +212,9 @@ type PoissonParams struct { func (x *PoissonParams) Reset() { *x = PoissonParams{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_control_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_control_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PoissonParams) String() string { @@ -227,7 +225,7 @@ func (*PoissonParams) ProtoMessage() {} func (x *PoissonParams) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_control_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -259,11 +257,9 @@ type ClosedLoopParams struct { func (x *ClosedLoopParams) Reset() { *x = ClosedLoopParams{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_control_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_control_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClosedLoopParams) String() string { @@ -274,7 +270,7 @@ func (*ClosedLoopParams) ProtoMessage() {} func (x *ClosedLoopParams) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_control_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -303,11 +299,9 @@ type LoadParams struct { func (x *LoadParams) Reset() { *x = LoadParams{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_control_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_control_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LoadParams) String() string { @@ -318,7 +312,7 @@ func (*LoadParams) ProtoMessage() {} func (x *LoadParams) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_control_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -383,11 +377,9 @@ type SecurityParams struct { func (x *SecurityParams) Reset() { *x = SecurityParams{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_control_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_control_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecurityParams) String() string { @@ -398,7 +390,7 @@ func (*SecurityParams) ProtoMessage() {} func (x *SecurityParams) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_control_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -449,11 +441,9 @@ type ChannelArg struct { func (x *ChannelArg) Reset() { *x = ChannelArg{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_control_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_control_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ChannelArg) String() string { @@ -464,7 +454,7 @@ func (*ChannelArg) ProtoMessage() {} func (x *ChannelArg) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_control_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -566,11 +556,9 @@ type ClientConfig struct { func (x *ClientConfig) Reset() { *x = ClientConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_control_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_control_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClientConfig) String() string { @@ -581,7 +569,7 @@ func (*ClientConfig) ProtoMessage() {} func (x *ClientConfig) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_control_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -739,11 +727,9 @@ type ClientStatus struct { func (x *ClientStatus) Reset() { *x = ClientStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_control_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_control_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClientStatus) String() string { @@ -754,7 +740,7 @@ func (*ClientStatus) ProtoMessage() {} func (x *ClientStatus) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_control_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -788,11 +774,9 @@ type Mark struct { func (x *Mark) Reset() { *x = Mark{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_control_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_control_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Mark) String() string { @@ -803,7 +787,7 @@ func (*Mark) ProtoMessage() {} func (x *Mark) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_control_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -839,11 +823,9 @@ type ClientArgs struct { func (x *ClientArgs) Reset() { *x = ClientArgs{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_control_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_control_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClientArgs) String() string { @@ -854,7 +836,7 @@ func (*ClientArgs) ProtoMessage() {} func (x *ClientArgs) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_control_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -939,11 +921,9 @@ type ServerConfig struct { func (x *ServerConfig) Reset() { *x = ServerConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_control_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_control_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerConfig) String() string { @@ -954,7 +934,7 @@ func (*ServerConfig) ProtoMessage() {} func (x *ServerConfig) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_control_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1067,11 +1047,9 @@ type ServerArgs struct { func (x *ServerArgs) Reset() { *x = ServerArgs{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_control_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_control_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerArgs) String() string { @@ -1082,7 +1060,7 @@ func (*ServerArgs) ProtoMessage() {} func (x *ServerArgs) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_control_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1148,11 +1126,9 @@ type ServerStatus struct { func (x *ServerStatus) Reset() { *x = ServerStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_control_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_control_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerStatus) String() string { @@ -1163,7 +1139,7 @@ func (*ServerStatus) ProtoMessage() {} func (x *ServerStatus) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_control_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1207,11 +1183,9 @@ type CoreRequest struct { func (x *CoreRequest) Reset() { *x = CoreRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_control_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_control_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CoreRequest) String() string { @@ -1222,7 +1196,7 @@ func (*CoreRequest) ProtoMessage() {} func (x *CoreRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_control_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1248,11 +1222,9 @@ type CoreResponse struct { func (x *CoreResponse) Reset() { *x = CoreResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_control_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_control_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CoreResponse) String() string { @@ -1263,7 +1235,7 @@ func (*CoreResponse) ProtoMessage() {} func (x *CoreResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_control_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1293,11 +1265,9 @@ type Void struct { func (x *Void) Reset() { *x = Void{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_control_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_control_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Void) String() string { @@ -1308,7 +1278,7 @@ func (*Void) ProtoMessage() {} func (x *Void) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_control_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1349,11 +1319,9 @@ type Scenario struct { func (x *Scenario) Reset() { *x = Scenario{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_control_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_control_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Scenario) String() string { @@ -1364,7 +1332,7 @@ func (*Scenario) ProtoMessage() {} func (x *Scenario) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_control_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1446,11 +1414,9 @@ type Scenarios struct { func (x *Scenarios) Reset() { *x = Scenarios{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_control_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_control_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Scenarios) String() string { @@ -1461,7 +1427,7 @@ func (*Scenarios) ProtoMessage() {} func (x *Scenarios) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_control_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1531,11 +1497,9 @@ type ScenarioResultSummary struct { func (x *ScenarioResultSummary) Reset() { *x = ScenarioResultSummary{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_control_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_control_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ScenarioResultSummary) String() string { @@ -1546,7 +1510,7 @@ func (*ScenarioResultSummary) ProtoMessage() {} func (x *ScenarioResultSummary) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_control_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1728,11 +1692,9 @@ type ScenarioResult struct { func (x *ScenarioResult) Reset() { *x = ScenarioResult{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_control_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_control_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ScenarioResult) String() string { @@ -1743,7 +1705,7 @@ func (*ScenarioResult) ProtoMessage() {} func (x *ScenarioResult) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_control_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2224,236 +2186,6 @@ func file_grpc_testing_control_proto_init() { } file_grpc_testing_payloads_proto_init() file_grpc_testing_stats_proto_init() - if !protoimpl.UnsafeEnabled { - file_grpc_testing_control_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*PoissonParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_control_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*ClosedLoopParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_control_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*LoadParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_control_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*SecurityParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_control_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*ChannelArg); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_control_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*ClientConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_control_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*ClientStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_control_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*Mark); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_control_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*ClientArgs); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_control_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*ServerConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_control_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*ServerArgs); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_control_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*ServerStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_control_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*CoreRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_control_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*CoreResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_control_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*Void); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_control_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*Scenario); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_control_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*Scenarios); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_control_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*ScenarioResultSummary); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_control_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*ScenarioResult); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_grpc_testing_control_proto_msgTypes[2].OneofWrappers = []any{ (*LoadParams_ClosedLoop)(nil), (*LoadParams_Poisson)(nil), diff --git a/interop/grpc_testing/core/stats.pb.go b/interop/grpc_testing/core/stats.pb.go index e6ca38ef3597..853ddbb9617e 100644 --- a/interop/grpc_testing/core/stats.pb.go +++ b/interop/grpc_testing/core/stats.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/core/stats.proto @@ -45,11 +45,9 @@ type Bucket struct { func (x *Bucket) Reset() { *x = Bucket{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_core_stats_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_core_stats_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Bucket) String() string { @@ -60,7 +58,7 @@ func (*Bucket) ProtoMessage() {} func (x *Bucket) ProtoReflect() protoreflect.Message { mi := &file_grpc_core_stats_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -99,11 +97,9 @@ type Histogram struct { func (x *Histogram) Reset() { *x = Histogram{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_core_stats_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_core_stats_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Histogram) String() string { @@ -114,7 +110,7 @@ func (*Histogram) ProtoMessage() {} func (x *Histogram) ProtoReflect() protoreflect.Message { mi := &file_grpc_core_stats_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -151,11 +147,9 @@ type Metric struct { func (x *Metric) Reset() { *x = Metric{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_core_stats_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_core_stats_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Metric) String() string { @@ -166,7 +160,7 @@ func (*Metric) ProtoMessage() {} func (x *Metric) ProtoReflect() protoreflect.Message { mi := &file_grpc_core_stats_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -235,11 +229,9 @@ type Stats struct { func (x *Stats) Reset() { *x = Stats{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_core_stats_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_core_stats_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Stats) String() string { @@ -250,7 +242,7 @@ func (*Stats) ProtoMessage() {} func (x *Stats) ProtoReflect() protoreflect.Message { mi := &file_grpc_core_stats_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -333,56 +325,6 @@ func file_grpc_core_stats_proto_init() { if File_grpc_core_stats_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_core_stats_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Bucket); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_core_stats_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Histogram); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_core_stats_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*Metric); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_core_stats_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*Stats); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_grpc_core_stats_proto_msgTypes[2].OneofWrappers = []any{ (*Metric_Count)(nil), (*Metric_Histogram)(nil), diff --git a/interop/grpc_testing/empty.pb.go b/interop/grpc_testing/empty.pb.go index 7709f270e498..5af8fd67d0b1 100644 --- a/interop/grpc_testing/empty.pb.go +++ b/interop/grpc_testing/empty.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/testing/empty.proto @@ -49,11 +49,9 @@ type Empty struct { func (x *Empty) Reset() { *x = Empty{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_empty_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_empty_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Empty) String() string { @@ -64,7 +62,7 @@ func (*Empty) ProtoMessage() {} func (x *Empty) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_empty_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -120,20 +118,6 @@ func file_grpc_testing_empty_proto_init() { if File_grpc_testing_empty_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_testing_empty_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Empty); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/interop/grpc_testing/messages.pb.go b/interop/grpc_testing/messages.pb.go index d3ccd24ff9de..9b44bd03c0ab 100644 --- a/interop/grpc_testing/messages.pb.go +++ b/interop/grpc_testing/messages.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/testing/messages.proto @@ -305,11 +305,9 @@ type BoolValue struct { func (x *BoolValue) Reset() { *x = BoolValue{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BoolValue) String() string { @@ -320,7 +318,7 @@ func (*BoolValue) ProtoMessage() {} func (x *BoolValue) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -356,11 +354,9 @@ type Payload struct { func (x *Payload) Reset() { *x = Payload{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Payload) String() string { @@ -371,7 +367,7 @@ func (*Payload) ProtoMessage() {} func (x *Payload) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -413,11 +409,9 @@ type EchoStatus struct { func (x *EchoStatus) Reset() { *x = EchoStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EchoStatus) String() string { @@ -428,7 +422,7 @@ func (*EchoStatus) ProtoMessage() {} func (x *EchoStatus) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -493,11 +487,9 @@ type SimpleRequest struct { func (x *SimpleRequest) Reset() { *x = SimpleRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SimpleRequest) String() string { @@ -508,7 +500,7 @@ func (*SimpleRequest) ProtoMessage() {} func (x *SimpleRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -624,11 +616,9 @@ type SimpleResponse struct { func (x *SimpleResponse) Reset() { *x = SimpleResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SimpleResponse) String() string { @@ -639,7 +629,7 @@ func (*SimpleResponse) ProtoMessage() {} func (x *SimpleResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -713,11 +703,9 @@ type StreamingInputCallRequest struct { func (x *StreamingInputCallRequest) Reset() { *x = StreamingInputCallRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StreamingInputCallRequest) String() string { @@ -728,7 +716,7 @@ func (*StreamingInputCallRequest) ProtoMessage() {} func (x *StreamingInputCallRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -769,11 +757,9 @@ type StreamingInputCallResponse struct { func (x *StreamingInputCallResponse) Reset() { *x = StreamingInputCallResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StreamingInputCallResponse) String() string { @@ -784,7 +770,7 @@ func (*StreamingInputCallResponse) ProtoMessage() {} func (x *StreamingInputCallResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -826,11 +812,9 @@ type ResponseParameters struct { func (x *ResponseParameters) Reset() { *x = ResponseParameters{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ResponseParameters) String() string { @@ -841,7 +825,7 @@ func (*ResponseParameters) ProtoMessage() {} func (x *ResponseParameters) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -900,11 +884,9 @@ type StreamingOutputCallRequest struct { func (x *StreamingOutputCallRequest) Reset() { *x = StreamingOutputCallRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StreamingOutputCallRequest) String() string { @@ -915,7 +897,7 @@ func (*StreamingOutputCallRequest) ProtoMessage() {} func (x *StreamingOutputCallRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -977,11 +959,9 @@ type StreamingOutputCallResponse struct { func (x *StreamingOutputCallResponse) Reset() { *x = StreamingOutputCallResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StreamingOutputCallResponse) String() string { @@ -992,7 +972,7 @@ func (*StreamingOutputCallResponse) ProtoMessage() {} func (x *StreamingOutputCallResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1026,11 +1006,9 @@ type ReconnectParams struct { func (x *ReconnectParams) Reset() { *x = ReconnectParams{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ReconnectParams) String() string { @@ -1041,7 +1019,7 @@ func (*ReconnectParams) ProtoMessage() {} func (x *ReconnectParams) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1077,11 +1055,9 @@ type ReconnectInfo struct { func (x *ReconnectInfo) Reset() { *x = ReconnectInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ReconnectInfo) String() string { @@ -1092,7 +1068,7 @@ func (*ReconnectInfo) ProtoMessage() {} func (x *ReconnectInfo) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1138,11 +1114,9 @@ type LoadBalancerStatsRequest struct { func (x *LoadBalancerStatsRequest) Reset() { *x = LoadBalancerStatsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LoadBalancerStatsRequest) String() string { @@ -1153,7 +1127,7 @@ func (*LoadBalancerStatsRequest) ProtoMessage() {} func (x *LoadBalancerStatsRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1205,11 +1179,9 @@ type LoadBalancerStatsResponse struct { func (x *LoadBalancerStatsResponse) Reset() { *x = LoadBalancerStatsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LoadBalancerStatsResponse) String() string { @@ -1220,7 +1192,7 @@ func (*LoadBalancerStatsResponse) ProtoMessage() {} func (x *LoadBalancerStatsResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1272,11 +1244,9 @@ type LoadBalancerAccumulatedStatsRequest struct { func (x *LoadBalancerAccumulatedStatsRequest) Reset() { *x = LoadBalancerAccumulatedStatsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LoadBalancerAccumulatedStatsRequest) String() string { @@ -1287,7 +1257,7 @@ func (*LoadBalancerAccumulatedStatsRequest) ProtoMessage() {} func (x *LoadBalancerAccumulatedStatsRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1330,11 +1300,9 @@ type LoadBalancerAccumulatedStatsResponse struct { func (x *LoadBalancerAccumulatedStatsResponse) Reset() { *x = LoadBalancerAccumulatedStatsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LoadBalancerAccumulatedStatsResponse) String() string { @@ -1345,7 +1313,7 @@ func (*LoadBalancerAccumulatedStatsResponse) ProtoMessage() {} func (x *LoadBalancerAccumulatedStatsResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1408,11 +1376,9 @@ type ClientConfigureRequest struct { func (x *ClientConfigureRequest) Reset() { *x = ClientConfigureRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClientConfigureRequest) String() string { @@ -1423,7 +1389,7 @@ func (*ClientConfigureRequest) ProtoMessage() {} func (x *ClientConfigureRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1468,11 +1434,9 @@ type ClientConfigureResponse struct { func (x *ClientConfigureResponse) Reset() { *x = ClientConfigureResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClientConfigureResponse) String() string { @@ -1483,7 +1447,7 @@ func (*ClientConfigureResponse) ProtoMessage() {} func (x *ClientConfigureResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1508,11 +1472,9 @@ type MemorySize struct { func (x *MemorySize) Reset() { *x = MemorySize{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MemorySize) String() string { @@ -1523,7 +1485,7 @@ func (*MemorySize) ProtoMessage() {} func (x *MemorySize) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1561,11 +1523,9 @@ type TestOrcaReport struct { func (x *TestOrcaReport) Reset() { *x = TestOrcaReport{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TestOrcaReport) String() string { @@ -1576,7 +1536,7 @@ func (*TestOrcaReport) ProtoMessage() {} func (x *TestOrcaReport) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1631,11 +1591,9 @@ type SetReturnStatusRequest struct { func (x *SetReturnStatusRequest) Reset() { *x = SetReturnStatusRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SetReturnStatusRequest) String() string { @@ -1646,7 +1604,7 @@ func (*SetReturnStatusRequest) ProtoMessage() {} func (x *SetReturnStatusRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1689,11 +1647,9 @@ type HookRequest struct { func (x *HookRequest) Reset() { *x = HookRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HookRequest) String() string { @@ -1704,7 +1660,7 @@ func (*HookRequest) ProtoMessage() {} func (x *HookRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1755,11 +1711,9 @@ type HookResponse struct { func (x *HookResponse) Reset() { *x = HookResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HookResponse) String() string { @@ -1770,7 +1724,7 @@ func (*HookResponse) ProtoMessage() {} func (x *HookResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1801,11 +1755,9 @@ type LoadBalancerStatsResponse_MetadataEntry struct { func (x *LoadBalancerStatsResponse_MetadataEntry) Reset() { *x = LoadBalancerStatsResponse_MetadataEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LoadBalancerStatsResponse_MetadataEntry) String() string { @@ -1816,7 +1768,7 @@ func (*LoadBalancerStatsResponse_MetadataEntry) ProtoMessage() {} func (x *LoadBalancerStatsResponse_MetadataEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1864,11 +1816,9 @@ type LoadBalancerStatsResponse_RpcMetadata struct { func (x *LoadBalancerStatsResponse_RpcMetadata) Reset() { *x = LoadBalancerStatsResponse_RpcMetadata{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LoadBalancerStatsResponse_RpcMetadata) String() string { @@ -1879,7 +1829,7 @@ func (*LoadBalancerStatsResponse_RpcMetadata) ProtoMessage() {} func (x *LoadBalancerStatsResponse_RpcMetadata) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1912,11 +1862,9 @@ type LoadBalancerStatsResponse_MetadataByPeer struct { func (x *LoadBalancerStatsResponse_MetadataByPeer) Reset() { *x = LoadBalancerStatsResponse_MetadataByPeer{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LoadBalancerStatsResponse_MetadataByPeer) String() string { @@ -1927,7 +1875,7 @@ func (*LoadBalancerStatsResponse_MetadataByPeer) ProtoMessage() {} func (x *LoadBalancerStatsResponse_MetadataByPeer) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1960,11 +1908,9 @@ type LoadBalancerStatsResponse_RpcsByPeer struct { func (x *LoadBalancerStatsResponse_RpcsByPeer) Reset() { *x = LoadBalancerStatsResponse_RpcsByPeer{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LoadBalancerStatsResponse_RpcsByPeer) String() string { @@ -1975,7 +1921,7 @@ func (*LoadBalancerStatsResponse_RpcsByPeer) ProtoMessage() {} func (x *LoadBalancerStatsResponse_RpcsByPeer) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2011,11 +1957,9 @@ type LoadBalancerAccumulatedStatsResponse_MethodStats struct { func (x *LoadBalancerAccumulatedStatsResponse_MethodStats) Reset() { *x = LoadBalancerAccumulatedStatsResponse_MethodStats{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LoadBalancerAccumulatedStatsResponse_MethodStats) String() string { @@ -2026,7 +1970,7 @@ func (*LoadBalancerAccumulatedStatsResponse_MethodStats) ProtoMessage() {} func (x *LoadBalancerAccumulatedStatsResponse_MethodStats) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2068,11 +2012,9 @@ type ClientConfigureRequest_Metadata struct { func (x *ClientConfigureRequest_Metadata) Reset() { *x = ClientConfigureRequest_Metadata{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_messages_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClientConfigureRequest_Metadata) String() string { @@ -2083,7 +2025,7 @@ func (*ClientConfigureRequest_Metadata) ProtoMessage() {} func (x *ClientConfigureRequest_Metadata) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_messages_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2617,356 +2559,6 @@ func file_grpc_testing_messages_proto_init() { if File_grpc_testing_messages_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_testing_messages_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*BoolValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Payload); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*EchoStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*SimpleRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*SimpleResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*StreamingInputCallRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*StreamingInputCallResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*ResponseParameters); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*StreamingOutputCallRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*StreamingOutputCallResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*ReconnectParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*ReconnectInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*LoadBalancerStatsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*LoadBalancerStatsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*LoadBalancerAccumulatedStatsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*LoadBalancerAccumulatedStatsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*ClientConfigureRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*ClientConfigureResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*MemorySize); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*TestOrcaReport); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*SetReturnStatusRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*HookRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*HookResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*LoadBalancerStatsResponse_MetadataEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*LoadBalancerStatsResponse_RpcMetadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*LoadBalancerStatsResponse_MetadataByPeer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*LoadBalancerStatsResponse_RpcsByPeer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[34].Exporter = func(v any, i int) any { - switch v := v.(*LoadBalancerAccumulatedStatsResponse_MethodStats); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_messages_proto_msgTypes[37].Exporter = func(v any, i int) any { - switch v := v.(*ClientConfigureRequest_Metadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/interop/grpc_testing/payloads.pb.go b/interop/grpc_testing/payloads.pb.go index 849bff770ca3..2380728d293e 100644 --- a/interop/grpc_testing/payloads.pb.go +++ b/interop/grpc_testing/payloads.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/testing/payloads.proto @@ -45,11 +45,9 @@ type ByteBufferParams struct { func (x *ByteBufferParams) Reset() { *x = ByteBufferParams{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_payloads_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_payloads_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ByteBufferParams) String() string { @@ -60,7 +58,7 @@ func (*ByteBufferParams) ProtoMessage() {} func (x *ByteBufferParams) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_payloads_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -100,11 +98,9 @@ type SimpleProtoParams struct { func (x *SimpleProtoParams) Reset() { *x = SimpleProtoParams{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_payloads_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_payloads_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SimpleProtoParams) String() string { @@ -115,7 +111,7 @@ func (*SimpleProtoParams) ProtoMessage() {} func (x *SimpleProtoParams) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_payloads_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -152,11 +148,9 @@ type ComplexProtoParams struct { func (x *ComplexProtoParams) Reset() { *x = ComplexProtoParams{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_payloads_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_payloads_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ComplexProtoParams) String() string { @@ -167,7 +161,7 @@ func (*ComplexProtoParams) ProtoMessage() {} func (x *ComplexProtoParams) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_payloads_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -197,11 +191,9 @@ type PayloadConfig struct { func (x *PayloadConfig) Reset() { *x = PayloadConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_payloads_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_payloads_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PayloadConfig) String() string { @@ -212,7 +204,7 @@ func (*PayloadConfig) ProtoMessage() {} func (x *PayloadConfig) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_payloads_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -349,56 +341,6 @@ func file_grpc_testing_payloads_proto_init() { if File_grpc_testing_payloads_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_testing_payloads_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*ByteBufferParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_payloads_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*SimpleProtoParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_payloads_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ComplexProtoParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_payloads_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*PayloadConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_grpc_testing_payloads_proto_msgTypes[3].OneofWrappers = []any{ (*PayloadConfig_BytebufParams)(nil), (*PayloadConfig_SimpleParams)(nil), diff --git a/interop/grpc_testing/report_qps_scenario_service.pb.go b/interop/grpc_testing/report_qps_scenario_service.pb.go index dd2ffbee88a4..8e74c093f284 100644 --- a/interop/grpc_testing/report_qps_scenario_service.pb.go +++ b/interop/grpc_testing/report_qps_scenario_service.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/testing/report_qps_scenario_service.proto diff --git a/interop/grpc_testing/stats.pb.go b/interop/grpc_testing/stats.pb.go index 25110f6209df..b0aed877755a 100644 --- a/interop/grpc_testing/stats.pb.go +++ b/interop/grpc_testing/stats.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/testing/stats.proto @@ -59,11 +59,9 @@ type ServerStats struct { func (x *ServerStats) Reset() { *x = ServerStats{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_stats_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_stats_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerStats) String() string { @@ -74,7 +72,7 @@ func (*ServerStats) ProtoMessage() {} func (x *ServerStats) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_stats_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -150,11 +148,9 @@ type HistogramParams struct { func (x *HistogramParams) Reset() { *x = HistogramParams{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_stats_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_stats_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HistogramParams) String() string { @@ -165,7 +161,7 @@ func (*HistogramParams) ProtoMessage() {} func (x *HistogramParams) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_stats_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -210,11 +206,9 @@ type HistogramData struct { func (x *HistogramData) Reset() { *x = HistogramData{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_stats_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_stats_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HistogramData) String() string { @@ -225,7 +219,7 @@ func (*HistogramData) ProtoMessage() {} func (x *HistogramData) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_stats_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -293,11 +287,9 @@ type RequestResultCount struct { func (x *RequestResultCount) Reset() { *x = RequestResultCount{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_stats_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_stats_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RequestResultCount) String() string { @@ -308,7 +300,7 @@ func (*RequestResultCount) ProtoMessage() {} func (x *RequestResultCount) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_stats_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -358,11 +350,9 @@ type ClientStats struct { func (x *ClientStats) Reset() { *x = ClientStats{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_stats_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_testing_stats_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClientStats) String() string { @@ -373,7 +363,7 @@ func (*ClientStats) ProtoMessage() {} func (x *ClientStats) ProtoReflect() protoreflect.Message { mi := &file_grpc_testing_stats_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -546,68 +536,6 @@ func file_grpc_testing_stats_proto_init() { if File_grpc_testing_stats_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_testing_stats_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*ServerStats); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_stats_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*HistogramParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_stats_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*HistogramData); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_stats_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*RequestResultCount); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_testing_stats_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*ClientStats); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/interop/grpc_testing/test.pb.go b/interop/grpc_testing/test.pb.go index ee1845d15181..148a6beb54f0 100644 --- a/interop/grpc_testing/test.pb.go +++ b/interop/grpc_testing/test.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/testing/test.proto diff --git a/interop/grpc_testing/worker_service.pb.go b/interop/grpc_testing/worker_service.pb.go index 8e7b5509ae67..d074cb5e3e6d 100644 --- a/interop/grpc_testing/worker_service.pb.go +++ b/interop/grpc_testing/worker_service.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/testing/worker_service.proto diff --git a/interop/observability/go.mod b/interop/observability/go.mod index b14505dfa983..eb06c93c99d0 100644 --- a/interop/observability/go.mod +++ b/interop/observability/go.mod @@ -3,33 +3,33 @@ module google.golang.org/grpc/interop/observability go 1.22.7 require ( - google.golang.org/grpc v1.66.2 + google.golang.org/grpc v1.67.1 google.golang.org/grpc/gcp/observability v1.0.1 ) require ( - cloud.google.com/go v0.115.1 // indirect - cloud.google.com/go/auth v0.9.4 // indirect + cloud.google.com/go v0.116.0 // indirect + cloud.google.com/go/auth v0.9.8 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/logging v1.11.0 // indirect - cloud.google.com/go/longrunning v0.6.0 // indirect - cloud.google.com/go/monitoring v1.21.0 // indirect - cloud.google.com/go/trace v1.11.0 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/logging v1.12.0 // indirect + cloud.google.com/go/longrunning v0.6.1 // indirect + cloud.google.com/go/monitoring v1.21.1 // indirect + cloud.google.com/go/trace v1.11.1 // indirect contrib.go.opencensus.io/exporter/stackdriver v0.13.15-0.20230702191903-2de6d2748484 // indirect - github.com/aws/aws-sdk-go-v2 v1.30.5 // indirect - github.com/aws/aws-sdk-go-v2/config v1.27.33 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.32 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 // indirect + github.com/aws/aws-sdk-go-v2 v1.32.2 // indirect + github.com/aws/aws-sdk-go-v2/config v1.28.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.41 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 // indirect - github.com/aws/smithy-go v1.20.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 // indirect + github.com/aws/smithy-go v1.22.0 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect @@ -43,24 +43,24 @@ require ( github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect - go.opentelemetry.io/otel v1.30.0 // indirect - go.opentelemetry.io/otel/metric v1.30.0 // indirect - go.opentelemetry.io/otel/trace v1.30.0 // indirect - golang.org/x/crypto v0.27.0 // indirect - golang.org/x/net v0.29.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect + go.opentelemetry.io/otel v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect + go.opentelemetry.io/otel/trace v1.31.0 // indirect + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/text v0.18.0 // indirect - golang.org/x/time v0.6.0 // indirect - google.golang.org/api v0.197.0 // indirect - google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/time v0.7.0 // indirect + google.golang.org/api v0.201.0 // indirect + google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect google.golang.org/grpc/stats/opencensus v1.0.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/protobuf v1.35.1 // indirect ) replace google.golang.org/grpc => ../.. diff --git a/interop/observability/go.sum b/interop/observability/go.sum index 5efcd511f56f..fc68e26e9399 100644 --- a/interop/observability/go.sum +++ b/interop/observability/go.sum @@ -1,5 +1,5 @@ cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= -cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= +cel.dev/expr v0.16.2/go.mod h1:gXngZQMkWJoSbE8mOzehJlXQyubn/Vg0vR9/F3W7iw8= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -40,8 +40,8 @@ cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMz cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= -cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= -cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= @@ -118,8 +118,8 @@ cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEar cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= -cloud.google.com/go/auth v0.9.4 h1:DxF7imbEbiFu9+zdKC6cKBko1e8XeJnipNqIbWZ+kDI= -cloud.google.com/go/auth v0.9.4/go.mod h1:SHia8n6//Ya940F1rLimhJCjjx7KE17t0ctFEci3HkA= +cloud.google.com/go/auth v0.9.8 h1:+CSJ0Gw9iVeSENVCKJoLHhdUykDgXSc4Qn+gu2BRtR8= +cloud.google.com/go/auth v0.9.8/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= @@ -219,8 +219,8 @@ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1h cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= @@ -401,8 +401,8 @@ cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCta cloud.google.com/go/iam v1.0.1/go.mod h1:yR3tmSL8BcZB4bxByRv2jkSIahVmCtfKZwLYGBalRE8= cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk= cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= -cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8= -cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q= +cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU= +cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= @@ -440,16 +440,16 @@ cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/logging v1.11.0 h1:v3ktVzXMV7CwHq1MBF65wcqLMA7i+z3YxbUsoK7mOKs= -cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6RuuFOZ67+A= +cloud.google.com/go/logging v1.12.0 h1:ex1igYcGFd4S/RZWOCU51StlIEuey5bjqwH9ZYjHibk= +cloud.google.com/go/logging v1.12.0/go.mod h1:wwYBt5HlYP1InnrtYI0wtwttpVU1rifnMT7RejksUAM= cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= cloud.google.com/go/longrunning v0.4.2/go.mod h1:OHrnaYyLUV6oqwh0xiS7e5sLQhP1m0QU9R+WhGDMgIQ= cloud.google.com/go/longrunning v0.5.0/go.mod h1:0JNuqRShmscVAhIACGtskSAWtqtOoPkwP0YF1oVEchc= cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= -cloud.google.com/go/longrunning v0.6.0 h1:mM1ZmaNsQsnb+5n1DNPeL0KwQd9jQRqSqSDEkBZr+aI= -cloud.google.com/go/longrunning v0.6.0/go.mod h1:uHzSZqW89h7/pasCWNYdUpwGz3PcVWhrWupreVPYLts= +cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc= +cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= @@ -481,8 +481,8 @@ cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuu cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= -cloud.google.com/go/monitoring v1.21.0 h1:EMc0tB+d3lUewT2NzKC/hr8cSR9WsUieVywzIHetGro= -cloud.google.com/go/monitoring v1.21.0/go.mod h1:tuJ+KNDdJbetSsbSGTqnaBvbauS5kr3Q/koy3Up6r+4= +cloud.google.com/go/monitoring v1.21.1 h1:zWtbIoBMnU5LP9A/fz8LmWMGHpk4skdfeiaa66QdFGc= +cloud.google.com/go/monitoring v1.21.1/go.mod h1:Rj++LKrlht9uBi8+Eb530dIrzG/cU/lB8mt+lbeFK1c= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= @@ -707,8 +707,8 @@ cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1r cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= -cloud.google.com/go/trace v1.11.0 h1:UHX6cOJm45Zw/KIbqHe4kII8PupLt/V5tscZUkeiJVI= -cloud.google.com/go/trace v1.11.0/go.mod h1:Aiemdi52635dBR7o3zuc9lLjXo3BwGaChEjCa3tJNmM= +cloud.google.com/go/trace v1.11.1 h1:UNqdP+HYYtnm6lb91aNA5JQ0X14GnxkABGlfz2PzPew= +cloud.google.com/go/trace v1.11.1/go.mod h1:IQKNQuBzH72EGaXEodKlNJrWykGZxet2zgjtS60OtjA= cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= @@ -786,32 +786,32 @@ github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0I github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= -github.com/aws/aws-sdk-go-v2 v1.30.5 h1:mWSRTwQAb0aLE17dSzztCVJWI9+cRMgqebndjwDyK0g= -github.com/aws/aws-sdk-go-v2 v1.30.5/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= -github.com/aws/aws-sdk-go-v2/config v1.27.33 h1:Nof9o/MsmH4oa0s2q9a0k7tMz5x/Yj5k06lDODWz3BU= -github.com/aws/aws-sdk-go-v2/config v1.27.33/go.mod h1:kEqdYzRb8dd8Sy2pOdEbExTTF5v7ozEXX0McgPE7xks= -github.com/aws/aws-sdk-go-v2/credentials v1.17.32 h1:7Cxhp/BnT2RcGy4VisJ9miUPecY+lyE9I8JvcZofn9I= -github.com/aws/aws-sdk-go-v2/credentials v1.17.32/go.mod h1:P5/QMF3/DCHbXGEGkdbilXHsyTBX5D3HSwcrSc9p20I= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 h1:pfQ2sqNpMVK6xz2RbqLEL0GH87JOwSxPV2rzm8Zsb74= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13/go.mod h1:NG7RXPUlqfsCLLFfi0+IpKN4sCB9D9fw/qTaSB+xRoU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 h1:pI7Bzt0BJtYA0N/JEC6B8fJ4RBrEMi1LBrkMdFYNSnQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17/go.mod h1:Dh5zzJYMtxfIjYW+/evjQ8uj2OyR/ve2KROHGHlSFqE= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 h1:Mqr/V5gvrhA2gvgnF42Zh5iMiQNcOYthFYwCyrnuWlc= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17/go.mod h1:aLJpZlCmjE+V+KtN1q1uyZkfnUWpQGpbsn89XPKyzfU= +github.com/aws/aws-sdk-go-v2 v1.32.2 h1:AkNLZEyYMLnx/Q/mSKkcMqwNFXMAvFto9bNsHqcTduI= +github.com/aws/aws-sdk-go-v2 v1.32.2/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= +github.com/aws/aws-sdk-go-v2/config v1.28.0 h1:FosVYWcqEtWNxHn8gB/Vs6jOlNwSoyOCA/g/sxyySOQ= +github.com/aws/aws-sdk-go-v2/config v1.28.0/go.mod h1:pYhbtvg1siOOg8h5an77rXle9tVG8T+BWLWAo7cOukc= +github.com/aws/aws-sdk-go-v2/credentials v1.17.41 h1:7gXo+Axmp+R4Z+AK8YFQO0ZV3L0gizGINCOWxSLY9W8= +github.com/aws/aws-sdk-go-v2/credentials v1.17.41/go.mod h1:u4Eb8d3394YLubphT4jLEwN1rLNq2wFOlT6OuxFwPzU= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 h1:TMH3f/SCAWdNtXXVPPu5D6wrr4G5hI1rAxbcocKfC7Q= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17/go.mod h1:1ZRXLdTpzdJb9fwTMXiLipENRxkGMTn1sfKexGllQCw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 h1:UAsR3xA31QGf79WzpG/ixT9FZvQlh5HY1NRqSHBNOCk= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21/go.mod h1:JNr43NFf5L9YaG3eKTm7HQzls9J+A9YYcGI5Quh1r2Y= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 h1:6jZVETqmYCadGFvrYEQfC5fAQmlo80CeL5psbno6r0s= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21/go.mod h1:1SR0GbLlnN3QUmYaflZNiH1ql+1qrSiB2vwcJ+4UM60= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 h1:rfprUlsdzgl7ZL2KlXiUAoJnI/VxfHCvDFr2QDFj6u4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19/go.mod h1:SCWkEdRq8/7EK60NcvvQ6NXKuTcchAD4ROAsC37VEZE= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 h1:pIaGg+08llrP7Q5aiz9ICWbY8cqhTkyy+0SHvfzQpTc= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.7/go.mod h1:eEygMHnTKH/3kNp9Jr1n3PdejuSNcgwLe1dWgQtO0VQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 h1:/Cfdu0XV3mONYKaOt1Gr0k1KvQzkzPyiKUdlWJqy+J4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7/go.mod h1:bCbAxKDqNvkHxRaIMnyVPXPo+OaPRwvmgzMxbz1VKSA= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 h1:NKTa1eqZYw8tiHSRGpP0VtTdub/8KNk8sDkNPFaOKDE= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.7/go.mod h1:NXi1dIAGteSaRLqYgarlhP/Ij0cFT+qmCwiJqWh/U5o= -github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= -github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 h1:s7NA1SOw8q/5c0wr8477yOPp0z+uBaXBnLE0XYb0POA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2/go.mod h1:fnjjWyAW/Pj5HYOxl9LJqWtEwS7W2qgcRLWP+uWbss0= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 h1:bSYXVyUzoTHoKalBmwaZxs97HU9DWWI3ehHSAMa7xOk= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.2/go.mod h1:skMqY7JElusiOUjMJMOv1jJsP7YUg7DrhgqZZWuzu1U= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 h1:AhmO1fHINP9vFYUE0LHzCWg/LfUWUF+zFPEcY9QXb7o= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2/go.mod h1:o8aQygT2+MVP0NaV6kbdE1YnnIM8RRVQzoeUH45GOdI= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 h1:CiS7i0+FUe+/YY1GvIBLLrR/XNGZ4CtM1Ll0XavNuVo= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.2/go.mod h1:HtaiBI8CjYoNVde8arShXb94UbQQi9L4EMr6D+xGBwo= +github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= +github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bazelbuild/rules_go v0.49.0/go.mod h1:Dhcz716Kqg1RHNWos+N6MlXNkjNP2EwZQ0LukRKJfMs= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= @@ -846,7 +846,7 @@ github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+m github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= -github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= @@ -1095,18 +1095,18 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0 h1:hCq2hNMwsegUvPzI7sPOvtO9cqyy5GbWt/Ybp2xrx8Q= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0/go.mod h1:LqaApwGx/oUmzsbqxkzuBvyoPpkxk3JQWnqfVrJ3wCA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= -go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= -go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= -go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= -go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= -go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= -go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= -go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -1134,8 +1134,9 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1263,8 +1264,9 @@ golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1403,9 +1405,11 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1424,7 +1428,8 @@ golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1445,16 +1450,17 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1600,8 +1606,8 @@ google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZ google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= -google.golang.org/api v0.197.0 h1:x6CwqQLsFiA5JKAiGyGBjc2bNtHtLddhJCE2IKuhhcQ= -google.golang.org/api v0.197.0/go.mod h1:AuOuo20GoQ331nq7DquGHlU6d+2wN2fZ8O0ta60nRNw= +google.golang.org/api v0.201.0 h1:+7AD9JNM3tREtawRMu8sOjSbb8VYcYXJG/2eEOmfDu0= +google.golang.org/api v0.201.0/go.mod h1:HVY0FCHVs89xIW9fzf/pBvOEm+OolHa86G/txFezyq4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -1747,8 +1753,8 @@ google.golang.org/genproto v0.0.0-20230629202037-9506855d4529/go.mod h1:xZnkP7mR google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108= google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 h1:Df6WuGvthPzc+JiQ/G+m+sNX24kc0aTBqoDN/0yyykE= +google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53/go.mod h1:fheguH3Am2dGp1LfXkrvwqC/KlFq8F0nLq3LryOMrrE= google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= @@ -1759,8 +1765,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go. google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= @@ -1776,9 +1782,9 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1802,8 +1808,9 @@ google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/interop/stress/grpc_testing/metrics.pb.go b/interop/stress/grpc_testing/metrics.pb.go index d5b8a87053db..98298d0ead68 100644 --- a/interop/stress/grpc_testing/metrics.pb.go +++ b/interop/stress/grpc_testing/metrics.pb.go @@ -21,7 +21,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: interop/stress/grpc_testing/metrics.proto @@ -58,11 +58,9 @@ type GaugeResponse struct { func (x *GaugeResponse) Reset() { *x = GaugeResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_interop_stress_grpc_testing_metrics_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_interop_stress_grpc_testing_metrics_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GaugeResponse) String() string { @@ -73,7 +71,7 @@ func (*GaugeResponse) ProtoMessage() {} func (x *GaugeResponse) ProtoReflect() protoreflect.Message { mi := &file_interop_stress_grpc_testing_metrics_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -156,11 +154,9 @@ type GaugeRequest struct { func (x *GaugeRequest) Reset() { *x = GaugeRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_interop_stress_grpc_testing_metrics_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_interop_stress_grpc_testing_metrics_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GaugeRequest) String() string { @@ -171,7 +167,7 @@ func (*GaugeRequest) ProtoMessage() {} func (x *GaugeRequest) ProtoReflect() protoreflect.Message { mi := &file_interop_stress_grpc_testing_metrics_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -201,11 +197,9 @@ type EmptyMessage struct { func (x *EmptyMessage) Reset() { *x = EmptyMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_interop_stress_grpc_testing_metrics_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_interop_stress_grpc_testing_metrics_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EmptyMessage) String() string { @@ -216,7 +210,7 @@ func (*EmptyMessage) ProtoMessage() {} func (x *EmptyMessage) ProtoReflect() protoreflect.Message { mi := &file_interop_stress_grpc_testing_metrics_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -302,44 +296,6 @@ func file_interop_stress_grpc_testing_metrics_proto_init() { if File_interop_stress_grpc_testing_metrics_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_interop_stress_grpc_testing_metrics_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GaugeResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_interop_stress_grpc_testing_metrics_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*GaugeRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_interop_stress_grpc_testing_metrics_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*EmptyMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_interop_stress_grpc_testing_metrics_proto_msgTypes[0].OneofWrappers = []any{ (*GaugeResponse_LongValue)(nil), (*GaugeResponse_DoubleValue)(nil), diff --git a/interop/xds/go.mod b/interop/xds/go.mod index 3eb581c8833b..d4052ef96dd4 100644 --- a/interop/xds/go.mod +++ b/interop/xds/go.mod @@ -7,43 +7,43 @@ replace google.golang.org/grpc => ../.. replace google.golang.org/grpc/stats/opentelemetry => ../../stats/opentelemetry require ( - github.com/prometheus/client_golang v1.20.3 - go.opentelemetry.io/otel/exporters/prometheus v0.52.0 - go.opentelemetry.io/otel/sdk/metric v1.30.0 - google.golang.org/grpc v1.66.2 - google.golang.org/grpc/stats/opentelemetry v0.0.0-20240912061038-b6fde8cdd1c0 + github.com/prometheus/client_golang v1.20.5 + go.opentelemetry.io/otel/exporters/prometheus v0.53.0 + go.opentelemetry.io/otel/sdk/metric v1.31.0 + google.golang.org/grpc v1.67.1 + google.golang.org/grpc/stats/opentelemetry v0.0.0-20241017035653-830135e6c5a3 ) require ( - cel.dev/expr v0.16.1 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 // indirect + cel.dev/expr v0.16.2 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect - github.com/envoyproxy/go-control-plane v0.13.0 // indirect + github.com/envoyproxy/go-control-plane v0.13.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.59.1 // indirect + github.com/prometheus/common v0.60.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - go.opentelemetry.io/contrib/detectors/gcp v1.30.0 // indirect - go.opentelemetry.io/otel v1.30.0 // indirect - go.opentelemetry.io/otel/metric v1.30.0 // indirect - go.opentelemetry.io/otel/sdk v1.30.0 // indirect - go.opentelemetry.io/otel/trace v1.30.0 // indirect - golang.org/x/net v0.29.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.31.0 // indirect + go.opentelemetry.io/otel v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect + go.opentelemetry.io/otel/sdk v1.31.0 // indirect + go.opentelemetry.io/otel/trace v1.31.0 // indirect + golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/text v0.18.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/protobuf v1.34.2 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/protobuf v1.35.1 // indirect ) diff --git a/interop/xds/go.sum b/interop/xds/go.sum index 46204867e2f8..ef695b391a3d 100644 --- a/interop/xds/go.sum +++ b/interop/xds/go.sum @@ -1,9 +1,9 @@ -cel.dev/expr v0.16.1 h1:NR0+oFYzR1CqLFhTAqg3ql59G9VfN8fKq1TCHJ6gq1g= -cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 h1:pB2F2JKCj1Znmp2rwxxt1J0Fg0wezTMgWYk5Mpbi1kg= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= +cel.dev/expr v0.16.2 h1:RwRhoH17VhAu9U5CMvMhH1PDVgf0tuz9FT+24AfMLfU= +cel.dev/expr v0.16.2/go.mod h1:gXngZQMkWJoSbE8mOzehJlXQyubn/Vg0vR9/F3W7iw8= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3 h1:cb3br57K508pQEFgBxn9GDhPS9HefpyMPK1RzmtMNzk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= @@ -14,8 +14,8 @@ github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8E github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= -github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -27,8 +27,8 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -37,45 +37,45 @@ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgm github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4= -github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= -github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -go.opentelemetry.io/contrib/detectors/gcp v1.30.0 h1:GF+YVnUeJwOy+Ag2cTEpVZq+r2Tnci42FIiNwA2gjME= -go.opentelemetry.io/contrib/detectors/gcp v1.30.0/go.mod h1:p5Av42vWKPezk67MQwLYZwlo/z6xLnN/upaIyQNWBGg= -go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= -go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= -go.opentelemetry.io/otel/exporters/prometheus v0.52.0 h1:kmU3H0b9ufFSi8IQCcxack+sWUblKkFbqWYs6YiACGQ= -go.opentelemetry.io/otel/exporters/prometheus v0.52.0/go.mod h1:+wsAp2+JhuGXX7YRkjlkx6hyWY3ogFPfNA4x3nyiAh0= -go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= -go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= -go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= -go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= -go.opentelemetry.io/otel/sdk/metric v1.30.0 h1:QJLT8Pe11jyHBHfSAgYH7kEmT24eX792jZO1bo4BXkM= -go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y= -go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= -go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +go.opentelemetry.io/contrib/detectors/gcp v1.31.0 h1:G1JQOreVrfhRkner+l4mrGxmfqYCAuy76asTDAo0xsA= +go.opentelemetry.io/contrib/detectors/gcp v1.31.0/go.mod h1:tzQL6E1l+iV44YFTkcAeNQqzXUiekSYP9jjJjXwEd00= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/exporters/prometheus v0.53.0 h1:QXobPHrwiGLM4ufrY3EOmDPJpo2P90UuFau4CDPJA/I= +go.opentelemetry.io/otel/exporters/prometheus v0.53.0/go.mod h1:WOAXGr3D00CfzmFxtTV1eR0GpoHuPEu+HJT8UWW2SIU= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/profiling/proto/service.pb.go b/profiling/proto/service.pb.go index 436401153cca..e63964fb8d9c 100644 --- a/profiling/proto/service.pb.go +++ b/profiling/proto/service.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: profiling/proto/service.proto @@ -48,11 +48,9 @@ type EnableRequest struct { func (x *EnableRequest) Reset() { *x = EnableRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_profiling_proto_service_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_profiling_proto_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnableRequest) String() string { @@ -63,7 +61,7 @@ func (*EnableRequest) ProtoMessage() {} func (x *EnableRequest) ProtoReflect() protoreflect.Message { mi := &file_profiling_proto_service_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -94,11 +92,9 @@ type EnableResponse struct { func (x *EnableResponse) Reset() { *x = EnableResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_profiling_proto_service_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_profiling_proto_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnableResponse) String() string { @@ -109,7 +105,7 @@ func (*EnableResponse) ProtoMessage() {} func (x *EnableResponse) ProtoReflect() protoreflect.Message { mi := &file_profiling_proto_service_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -134,11 +130,9 @@ type GetStreamStatsRequest struct { func (x *GetStreamStatsRequest) Reset() { *x = GetStreamStatsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_profiling_proto_service_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_profiling_proto_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetStreamStatsRequest) String() string { @@ -149,7 +143,7 @@ func (*GetStreamStatsRequest) ProtoMessage() {} func (x *GetStreamStatsRequest) ProtoReflect() protoreflect.Message { mi := &file_profiling_proto_service_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -176,11 +170,9 @@ type GetStreamStatsResponse struct { func (x *GetStreamStatsResponse) Reset() { *x = GetStreamStatsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_profiling_proto_service_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_profiling_proto_service_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetStreamStatsResponse) String() string { @@ -191,7 +183,7 @@ func (*GetStreamStatsResponse) ProtoMessage() {} func (x *GetStreamStatsResponse) ProtoReflect() protoreflect.Message { mi := &file_profiling_proto_service_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -239,11 +231,9 @@ type Timer struct { func (x *Timer) Reset() { *x = Timer{} - if protoimpl.UnsafeEnabled { - mi := &file_profiling_proto_service_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_profiling_proto_service_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Timer) String() string { @@ -254,7 +244,7 @@ func (*Timer) ProtoMessage() {} func (x *Timer) ProtoReflect() protoreflect.Message { mi := &file_profiling_proto_service_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -332,11 +322,9 @@ type Stat struct { func (x *Stat) Reset() { *x = Stat{} - if protoimpl.UnsafeEnabled { - mi := &file_profiling_proto_service_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_profiling_proto_service_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Stat) String() string { @@ -347,7 +335,7 @@ func (*Stat) ProtoMessage() {} func (x *Stat) ProtoReflect() protoreflect.Message { mi := &file_profiling_proto_service_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -478,80 +466,6 @@ func file_profiling_proto_service_proto_init() { if File_profiling_proto_service_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_profiling_proto_service_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*EnableRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_profiling_proto_service_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*EnableResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_profiling_proto_service_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*GetStreamStatsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_profiling_proto_service_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*GetStreamStatsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_profiling_proto_service_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Timer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_profiling_proto_service_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*Stat); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/reflection/grpc_reflection_v1/reflection.pb.go b/reflection/grpc_reflection_v1/reflection.pb.go index e1f58104d856..58019722d01b 100644 --- a/reflection/grpc_reflection_v1/reflection.pb.go +++ b/reflection/grpc_reflection_v1/reflection.pb.go @@ -21,7 +21,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/reflection/v1/reflection.proto @@ -64,11 +64,9 @@ type ServerReflectionRequest struct { func (x *ServerReflectionRequest) Reset() { *x = ServerReflectionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerReflectionRequest) String() string { @@ -79,7 +77,7 @@ func (*ServerReflectionRequest) ProtoMessage() {} func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -208,11 +206,9 @@ type ExtensionRequest struct { func (x *ExtensionRequest) Reset() { *x = ExtensionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRequest) String() string { @@ -223,7 +219,7 @@ func (*ExtensionRequest) ProtoMessage() {} func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -274,11 +270,9 @@ type ServerReflectionResponse struct { func (x *ServerReflectionResponse) Reset() { *x = ServerReflectionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerReflectionResponse) String() string { @@ -289,7 +283,7 @@ func (*ServerReflectionResponse) ProtoMessage() {} func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -408,11 +402,9 @@ type FileDescriptorResponse struct { func (x *FileDescriptorResponse) Reset() { *x = FileDescriptorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorResponse) String() string { @@ -423,7 +415,7 @@ func (*FileDescriptorResponse) ProtoMessage() {} func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -460,11 +452,9 @@ type ExtensionNumberResponse struct { func (x *ExtensionNumberResponse) Reset() { *x = ExtensionNumberResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionNumberResponse) String() string { @@ -475,7 +465,7 @@ func (*ExtensionNumberResponse) ProtoMessage() {} func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -517,11 +507,9 @@ type ListServiceResponse struct { func (x *ListServiceResponse) Reset() { *x = ListServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListServiceResponse) String() string { @@ -532,7 +520,7 @@ func (*ListServiceResponse) ProtoMessage() {} func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -568,11 +556,9 @@ type ServiceResponse struct { func (x *ServiceResponse) Reset() { *x = ServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceResponse) String() string { @@ -583,7 +569,7 @@ func (*ServiceResponse) ProtoMessage() {} func (x *ServiceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -618,11 +604,9 @@ type ErrorResponse struct { func (x *ErrorResponse) Reset() { *x = ErrorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ErrorResponse) String() string { @@ -633,7 +617,7 @@ func (*ErrorResponse) ProtoMessage() {} func (x *ErrorResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -821,104 +805,6 @@ func file_grpc_reflection_v1_reflection_proto_init() { if File_grpc_reflection_v1_reflection_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*ServerReflectionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ServerReflectionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionNumberResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*ListServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*ServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*ErrorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []any{ (*ServerReflectionRequest_FileByFilename)(nil), (*ServerReflectionRequest_FileContainingSymbol)(nil), diff --git a/reflection/grpc_reflection_v1alpha/reflection.pb.go b/reflection/grpc_reflection_v1alpha/reflection.pb.go index 0582e16af2b0..4d7b654ef544 100644 --- a/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. @@ -64,11 +64,9 @@ type ServerReflectionRequest struct { func (x *ServerReflectionRequest) Reset() { *x = ServerReflectionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerReflectionRequest) String() string { @@ -79,7 +77,7 @@ func (*ServerReflectionRequest) ProtoMessage() {} func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -229,11 +227,9 @@ type ExtensionRequest struct { func (x *ExtensionRequest) Reset() { *x = ExtensionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRequest) String() string { @@ -244,7 +240,7 @@ func (*ExtensionRequest) ProtoMessage() {} func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -301,11 +297,9 @@ type ServerReflectionResponse struct { func (x *ServerReflectionResponse) Reset() { *x = ServerReflectionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerReflectionResponse) String() string { @@ -316,7 +310,7 @@ func (*ServerReflectionResponse) ProtoMessage() {} func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -453,11 +447,9 @@ type FileDescriptorResponse struct { func (x *FileDescriptorResponse) Reset() { *x = FileDescriptorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorResponse) String() string { @@ -468,7 +460,7 @@ func (*FileDescriptorResponse) ProtoMessage() {} func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -511,11 +503,9 @@ type ExtensionNumberResponse struct { func (x *ExtensionNumberResponse) Reset() { *x = ExtensionNumberResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionNumberResponse) String() string { @@ -526,7 +516,7 @@ func (*ExtensionNumberResponse) ProtoMessage() {} func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -574,11 +564,9 @@ type ListServiceResponse struct { func (x *ListServiceResponse) Reset() { *x = ListServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListServiceResponse) String() string { @@ -589,7 +577,7 @@ func (*ListServiceResponse) ProtoMessage() {} func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -630,11 +618,9 @@ type ServiceResponse struct { func (x *ServiceResponse) Reset() { *x = ServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceResponse) String() string { @@ -645,7 +631,7 @@ func (*ServiceResponse) ProtoMessage() {} func (x *ServiceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -686,11 +672,9 @@ type ErrorResponse struct { func (x *ErrorResponse) Reset() { *x = ErrorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ErrorResponse) String() string { @@ -701,7 +685,7 @@ func (*ErrorResponse) ProtoMessage() {} func (x *ErrorResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -896,104 +880,6 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { if File_grpc_reflection_v1alpha_reflection_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*ServerReflectionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ServerReflectionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionNumberResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*ListServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*ServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*ErrorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []any{ (*ServerReflectionRequest_FileByFilename)(nil), (*ServerReflectionRequest_FileContainingSymbol)(nil), diff --git a/reflection/grpc_testing/proto2.pb.go b/reflection/grpc_testing/proto2.pb.go index 079a437b610e..3b12b738d1a9 100644 --- a/reflection/grpc_testing/proto2.pb.go +++ b/reflection/grpc_testing/proto2.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: reflection/grpc_testing/proto2.proto @@ -45,11 +45,9 @@ type ToBeExtended struct { func (x *ToBeExtended) Reset() { *x = ToBeExtended{} - if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_testing_proto2_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_reflection_grpc_testing_proto2_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ToBeExtended) String() string { @@ -60,7 +58,7 @@ func (*ToBeExtended) ProtoMessage() {} func (x *ToBeExtended) ProtoReflect() protoreflect.Message { mi := &file_reflection_grpc_testing_proto2_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -125,22 +123,6 @@ func file_reflection_grpc_testing_proto2_proto_init() { if File_reflection_grpc_testing_proto2_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_reflection_grpc_testing_proto2_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*ToBeExtended); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/reflection/grpc_testing/proto2_ext.pb.go b/reflection/grpc_testing/proto2_ext.pb.go index 605ad6aa605b..d72a25d03e11 100644 --- a/reflection/grpc_testing/proto2_ext.pb.go +++ b/reflection/grpc_testing/proto2_ext.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: reflection/grpc_testing/proto2_ext.proto @@ -44,11 +44,9 @@ type Extension struct { func (x *Extension) Reset() { *x = Extension{} - if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_testing_proto2_ext_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_reflection_grpc_testing_proto2_ext_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Extension) String() string { @@ -59,7 +57,7 @@ func (*Extension) ProtoMessage() {} func (x *Extension) ProtoReflect() protoreflect.Message { mi := &file_reflection_grpc_testing_proto2_ext_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -187,20 +185,6 @@ func file_reflection_grpc_testing_proto2_ext_proto_init() { } file_reflection_grpc_testing_proto2_proto_init() file_reflection_grpc_testing_test_proto_init() - if !protoimpl.UnsafeEnabled { - file_reflection_grpc_testing_proto2_ext_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Extension); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/reflection/grpc_testing/proto2_ext2.pb.go b/reflection/grpc_testing/proto2_ext2.pb.go index f895a565f180..2be507e1cb37 100644 --- a/reflection/grpc_testing/proto2_ext2.pb.go +++ b/reflection/grpc_testing/proto2_ext2.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: reflection/grpc_testing/proto2_ext2.proto @@ -44,11 +44,9 @@ type AnotherExtension struct { func (x *AnotherExtension) Reset() { *x = AnotherExtension{} - if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_testing_proto2_ext2_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_reflection_grpc_testing_proto2_ext2_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AnotherExtension) String() string { @@ -59,7 +57,7 @@ func (*AnotherExtension) ProtoMessage() {} func (x *AnotherExtension) ProtoReflect() protoreflect.Message { mi := &file_reflection_grpc_testing_proto2_ext2_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -168,20 +166,6 @@ func file_reflection_grpc_testing_proto2_ext2_proto_init() { return } file_reflection_grpc_testing_proto2_proto_init() - if !protoimpl.UnsafeEnabled { - file_reflection_grpc_testing_proto2_ext2_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*AnotherExtension); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/reflection/grpc_testing/test.pb.go b/reflection/grpc_testing/test.pb.go index bd3a783e62e6..1e411dc3e519 100644 --- a/reflection/grpc_testing/test.pb.go +++ b/reflection/grpc_testing/test.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: reflection/grpc_testing/test.proto @@ -44,11 +44,9 @@ type SearchResponse struct { func (x *SearchResponse) Reset() { *x = SearchResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_testing_test_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_reflection_grpc_testing_test_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SearchResponse) String() string { @@ -59,7 +57,7 @@ func (*SearchResponse) ProtoMessage() {} func (x *SearchResponse) ProtoReflect() protoreflect.Message { mi := &file_reflection_grpc_testing_test_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -91,11 +89,9 @@ type SearchRequest struct { func (x *SearchRequest) Reset() { *x = SearchRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_testing_test_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_reflection_grpc_testing_test_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SearchRequest) String() string { @@ -106,7 +102,7 @@ func (*SearchRequest) ProtoMessage() {} func (x *SearchRequest) ProtoReflect() protoreflect.Message { mi := &file_reflection_grpc_testing_test_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -140,11 +136,9 @@ type SearchResponse_Result struct { func (x *SearchResponse_Result) Reset() { *x = SearchResponse_Result{} - if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_testing_test_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_reflection_grpc_testing_test_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SearchResponse_Result) String() string { @@ -155,7 +149,7 @@ func (*SearchResponse_Result) ProtoMessage() {} func (x *SearchResponse_Result) ProtoReflect() protoreflect.Message { mi := &file_reflection_grpc_testing_test_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -262,44 +256,6 @@ func file_reflection_grpc_testing_test_proto_init() { if File_reflection_grpc_testing_test_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_reflection_grpc_testing_test_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*SearchResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_reflection_grpc_testing_test_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*SearchRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_reflection_grpc_testing_test_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*SearchResponse_Result); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/reflection/test/go.mod b/reflection/test/go.mod index 4f3ea6193c10..7382e0e9842e 100644 --- a/reflection/test/go.mod +++ b/reflection/test/go.mod @@ -6,13 +6,13 @@ replace google.golang.org/grpc => ../../ require ( github.com/golang/protobuf v1.5.4 - google.golang.org/grpc v1.66.2 - google.golang.org/protobuf v1.34.2 + google.golang.org/grpc v1.67.1 + google.golang.org/protobuf v1.35.1 ) require ( - golang.org/x/net v0.29.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/text v0.18.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect ) diff --git a/reflection/test/go.sum b/reflection/test/go.sum index 1bb8cff78bcb..c1616371c89e 100644 --- a/reflection/test/go.sum +++ b/reflection/test/go.sum @@ -2,13 +2,13 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= diff --git a/scripts/revive.toml b/scripts/revive.toml index 30238dd8e66f..34d3816ea55d 100644 --- a/scripts/revive.toml +++ b/scripts/revive.toml @@ -13,7 +13,6 @@ [rule.package-comments] [rule.range] [rule.receiver-naming] -[rule.redefines-builtin-id] [rule.superfluous-else] [rule.time-naming] [rule.var-naming] @@ -28,4 +27,7 @@ Disabled = true [rule.import-shadowing] # Disabled to allow intentional reuse of variable names that are the same as package imports. Disabled = true +[rule.redefines-builtin-id] # Disabled to allow intentional reuse of variable names that are the same as built-in functions. + Disabled = true + diff --git a/security/advancedtls/examples/go.mod b/security/advancedtls/examples/go.mod index 5139e734efa9..fed6883ddb16 100644 --- a/security/advancedtls/examples/go.mod +++ b/security/advancedtls/examples/go.mod @@ -3,18 +3,18 @@ module google.golang.org/grpc/security/advancedtls/examples go 1.22.7 require ( - google.golang.org/grpc v1.66.2 - google.golang.org/grpc/examples v0.0.0-20240912061038-b6fde8cdd1c0 + google.golang.org/grpc v1.67.1 + google.golang.org/grpc/examples v0.0.0-20241017035653-830135e6c5a3 google.golang.org/grpc/security/advancedtls v1.0.0 ) require ( - golang.org/x/crypto v0.27.0 // indirect - golang.org/x/net v0.29.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/text v0.18.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/protobuf v1.34.2 // indirect + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/protobuf v1.35.1 // indirect ) replace google.golang.org/grpc => ../../.. diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index 4030fcdc1c60..d43932726ef0 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -1,14 +1,14 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= diff --git a/security/advancedtls/go.mod b/security/advancedtls/go.mod index fbbcf0b357e6..8f678372dc85 100644 --- a/security/advancedtls/go.mod +++ b/security/advancedtls/go.mod @@ -4,17 +4,17 @@ go 1.22.7 require ( github.com/google/go-cmp v0.6.0 - golang.org/x/crypto v0.27.0 - google.golang.org/grpc v1.66.2 + golang.org/x/crypto v0.28.0 + google.golang.org/grpc v1.67.1 google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b ) require ( - golang.org/x/net v0.29.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/text v0.18.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/protobuf v1.34.2 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/protobuf v1.35.1 // indirect ) replace google.golang.org/grpc => ../../ diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index 4030fcdc1c60..d43932726ef0 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -1,14 +1,14 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= diff --git a/stats/opencensus/go.mod b/stats/opencensus/go.mod index 370d83f62e46..c2bae551a9c7 100644 --- a/stats/opencensus/go.mod +++ b/stats/opencensus/go.mod @@ -5,16 +5,16 @@ go 1.22.7 require ( github.com/google/go-cmp v0.6.0 go.opencensus.io v0.24.0 - google.golang.org/grpc v1.66.2 + google.golang.org/grpc v1.67.1 ) require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - golang.org/x/net v0.29.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/text v0.18.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/protobuf v1.34.2 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/protobuf v1.35.1 // indirect ) replace google.golang.org/grpc => ../.. diff --git a/stats/opencensus/go.sum b/stats/opencensus/go.sum index f97ffd1b496e..d382006665f0 100644 --- a/stats/opencensus/go.sum +++ b/stats/opencensus/go.sum @@ -1,5 +1,5 @@ cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= -cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= +cel.dev/expr v0.16.2/go.mod h1:gXngZQMkWJoSbE8mOzehJlXQyubn/Vg0vR9/F3W7iw8= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -213,7 +213,7 @@ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1h cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= @@ -798,7 +798,7 @@ github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+m github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= -github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= @@ -1055,7 +1055,8 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1183,8 +1184,9 @@ golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1321,9 +1323,11 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1342,7 +1346,8 @@ golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1363,8 +1368,9 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1671,7 +1677,7 @@ google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go. google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= @@ -1687,9 +1693,9 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1713,8 +1719,9 @@ google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/stats/opentelemetry/go.mod b/stats/opentelemetry/go.mod index 2f029dc2d282..61e31b4ce343 100644 --- a/stats/opentelemetry/go.mod +++ b/stats/opentelemetry/go.mod @@ -5,21 +5,21 @@ go 1.22.7 replace google.golang.org/grpc => ../.. require ( - github.com/envoyproxy/go-control-plane v0.13.0 + github.com/envoyproxy/go-control-plane v0.13.1 github.com/google/go-cmp v0.6.0 - go.opentelemetry.io/contrib/detectors/gcp v1.30.0 - go.opentelemetry.io/otel v1.30.0 - go.opentelemetry.io/otel/metric v1.30.0 - go.opentelemetry.io/otel/sdk v1.30.0 - go.opentelemetry.io/otel/sdk/metric v1.30.0 - google.golang.org/grpc v1.66.2 - google.golang.org/protobuf v1.34.2 + go.opentelemetry.io/contrib/detectors/gcp v1.31.0 + go.opentelemetry.io/otel v1.31.0 + go.opentelemetry.io/otel/metric v1.31.0 + go.opentelemetry.io/otel/sdk v1.31.0 + go.opentelemetry.io/otel/sdk/metric v1.31.0 + google.golang.org/grpc v1.67.1 + google.golang.org/protobuf v1.35.1 ) require ( - cel.dev/expr v0.16.1 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 // indirect + cel.dev/expr v0.16.2 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect @@ -28,12 +28,12 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect - go.opentelemetry.io/otel/trace v1.30.0 // indirect - golang.org/x/net v0.29.0 // indirect + go.opentelemetry.io/otel/trace v1.31.0 // indirect + golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/text v0.18.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect ) diff --git a/stats/opentelemetry/go.sum b/stats/opentelemetry/go.sum index f2a2a19b1e07..14411e4f49c3 100644 --- a/stats/opentelemetry/go.sum +++ b/stats/opentelemetry/go.sum @@ -1,9 +1,9 @@ -cel.dev/expr v0.16.1 h1:NR0+oFYzR1CqLFhTAqg3ql59G9VfN8fKq1TCHJ6gq1g= -cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1 h1:pB2F2JKCj1Znmp2rwxxt1J0Fg0wezTMgWYk5Mpbi1kg= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= +cel.dev/expr v0.16.2 h1:RwRhoH17VhAu9U5CMvMhH1PDVgf0tuz9FT+24AfMLfU= +cel.dev/expr v0.16.2/go.mod h1:gXngZQMkWJoSbE8mOzehJlXQyubn/Vg0vR9/F3W7iw8= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3 h1:cb3br57K508pQEFgBxn9GDhPS9HefpyMPK1RzmtMNzk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= @@ -12,8 +12,8 @@ github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8E github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= -github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -31,35 +31,35 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -go.opentelemetry.io/contrib/detectors/gcp v1.30.0 h1:GF+YVnUeJwOy+Ag2cTEpVZq+r2Tnci42FIiNwA2gjME= -go.opentelemetry.io/contrib/detectors/gcp v1.30.0/go.mod h1:p5Av42vWKPezk67MQwLYZwlo/z6xLnN/upaIyQNWBGg= -go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= -go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= -go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= -go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= -go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= -go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= -go.opentelemetry.io/otel/sdk/metric v1.30.0 h1:QJLT8Pe11jyHBHfSAgYH7kEmT24eX792jZO1bo4BXkM= -go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y= -go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= -go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= +go.opentelemetry.io/contrib/detectors/gcp v1.31.0 h1:G1JQOreVrfhRkner+l4mrGxmfqYCAuy76asTDAo0xsA= +go.opentelemetry.io/contrib/detectors/gcp v1.31.0/go.mod h1:tzQL6E1l+iV44YFTkcAeNQqzXUiekSYP9jjJjXwEd00= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/test/codec_perf/perf.pb.go b/test/codec_perf/perf.pb.go index 20d8f3e16147..dbe6cb5700ca 100644 --- a/test/codec_perf/perf.pb.go +++ b/test/codec_perf/perf.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: test/codec_perf/perf.proto @@ -49,11 +49,9 @@ type Buffer struct { func (x *Buffer) Reset() { *x = Buffer{} - if protoimpl.UnsafeEnabled { - mi := &file_test_codec_perf_perf_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_test_codec_perf_perf_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Buffer) String() string { @@ -64,7 +62,7 @@ func (*Buffer) ProtoMessage() {} func (x *Buffer) ProtoReflect() protoreflect.Message { mi := &file_test_codec_perf_perf_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -128,20 +126,6 @@ func file_test_codec_perf_perf_proto_init() { if File_test_codec_perf_perf_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_test_codec_perf_perf_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Buffer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/test/tools/go.mod b/test/tools/go.mod index b8ba4219637c..49c34127fa70 100644 --- a/test/tools/go.mod +++ b/test/tools/go.mod @@ -4,9 +4,9 @@ go 1.22.7 require ( github.com/client9/misspell v0.3.4 - github.com/mgechev/revive v1.3.9 - golang.org/x/tools v0.25.0 - google.golang.org/protobuf v1.34.2 + github.com/mgechev/revive v1.4.0 + golang.org/x/tools v0.26.0 + google.golang.org/protobuf v1.35.1 honnef.co/go/tools v0.5.1 ) @@ -22,12 +22,11 @@ require ( github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/spf13/afero v1.11.0 // indirect - golang.org/x/exp/typeparams v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/exp/typeparams v0.0.0-20241009180824-f66d83c29e7c // indirect golang.org/x/mod v0.21.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/text v0.18.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect ) diff --git a/test/tools/go.sum b/test/tools/go.sum index 193ec0ce229f..9e262e028bd8 100644 --- a/test/tools/go.sum +++ b/test/tools/go.sum @@ -25,14 +25,12 @@ github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6T github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 h1:zpIH83+oKzcpryru8ceC6BxnoG8TBrhgAvRg8obzup0= github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.3.9 h1:18Y3R4a2USSBF+QZKFQwVkBROUda7uoBlkEuBD+YD1A= -github.com/mgechev/revive v1.3.9/go.mod h1:+uxEIr5UH0TjXWHTno3xh4u7eg6jDpXKzQccA9UGhHU= +github.com/mgechev/revive v1.4.0 h1:+6LDNE1XKsUCkpuDOMrzjOsXqiQOZ/jPlscLyA6mMXw= +github.com/mgechev/revive v1.4.0/go.mod h1:uzGR6feiCiJi4oND58/KMt/lEnR5vmjzRYPZiR0sQRQ= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= @@ -48,22 +46,22 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -golang.org/x/exp/typeparams v0.0.0-20240909161429-701f63a606c0 h1:bVwtbF629Xlyxk6xLQq2TDYmqP0uiWaet5LwRebuY0k= -golang.org/x/exp/typeparams v0.0.0-20240909161429-701f63a606c0/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20241009180824-f66d83c29e7c h1:F/15/6p7LyGUSoP0GE5CB/U9+TNEER1foNOP5sWLLnI= +golang.org/x/exp/typeparams v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= -golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 14e2a206ca0a3509dc0f03c5ae6fa6e169caf964 Mon Sep 17 00:00:00 2001 From: apolcyn Date: Mon, 21 Oct 2024 11:31:44 -0700 Subject: [PATCH 18/57] resolver/google-c2p: introduce SetUniverseDomain API (#7719) --- xds/googledirectpath/googlec2p.go | 62 +++++++- xds/googledirectpath/googlec2p_test.go | 204 +++++++++++++++++++++++++ 2 files changed, 258 insertions(+), 8 deletions(-) diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index 936bf2da3274..fab8097e41b7 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -30,6 +30,7 @@ import ( "fmt" "math/rand" "net/url" + "sync" "time" "google.golang.org/grpc/grpclog" @@ -46,7 +47,7 @@ const ( c2pScheme = "google-c2p" c2pAuthority = "traffic-director-c2p.xds.googleapis.com" - tdURL = "dns:///directpath-pa.googleapis.com" + defaultUniverseDomain = "googleapis.com" zoneURL = "http://metadata.google.internal/computeMetadata/v1/instance/zone" ipv6URL = "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ipv6s" ipv6CapableMetadataName = "TRAFFICDIRECTOR_DIRECTPATH_C2P_IPV6_CAPABLE" @@ -56,17 +57,66 @@ const ( dnsName, xdsName = "dns", "xds" ) -// For overriding in unittests. var ( + logger = internalgrpclog.NewPrefixLogger(grpclog.Component("directpath"), logPrefix) + universeDomainMu sync.Mutex + universeDomain = "" + // For overriding in unittests. onGCE = googlecloud.OnGCE randInt = rand.Int - logger = internalgrpclog.NewPrefixLogger(grpclog.Component("directpath"), logPrefix) ) func init() { resolver.Register(c2pResolverBuilder{}) } +// SetUniverseDomain informs the gRPC library of the universe domain +// in which the process is running (for example, "googleapis.com"). +// It is the caller's responsibility to ensure that the domain is correct. +// +// This setting is used by the "google-c2p" resolver (the resolver used +// for URIs with the "google-c2p" scheme) to configure its dependencies. +// +// If a gRPC channel is created with the "google-c2p" URI scheme and this +// function has NOT been called, then gRPC configures the universe domain as +// "googleapis.com". +// +// Returns nil if either: +// +// a) The universe domain has not yet been configured. +// b) The universe domain has been configured and matches the provided value. +// +// Otherwise, returns an error. +func SetUniverseDomain(domain string) error { + universeDomainMu.Lock() + defer universeDomainMu.Unlock() + if domain == "" { + return fmt.Errorf("universe domain cannot be empty") + } + if universeDomain == "" { + universeDomain = domain + return nil + } + if universeDomain != domain { + return fmt.Errorf("universe domain cannot be set to %s, already set to different value: %s", domain, universeDomain) + } + return nil +} + +func getXdsServerURI() string { + universeDomainMu.Lock() + defer universeDomainMu.Unlock() + if universeDomain == "" { + universeDomain = defaultUniverseDomain + } + // Put env var override logic after default value logic so + // that tests still run the default value logic. + if envconfig.C2PResolverTestOnlyTrafficDirectorURI != "" { + return envconfig.C2PResolverTestOnlyTrafficDirectorURI + } + return fmt.Sprintf("dns:///directpath-pa.%s", universeDomain) +} + type c2pResolverBuilder struct{} func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { @@ -90,11 +140,7 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts go func() { zoneCh <- getZone(httpReqTimeout) }() go func() { ipv6CapableCh <- getIPv6Capable(httpReqTimeout) }() - xdsServerURI := envconfig.C2PResolverTestOnlyTrafficDirectorURI - if xdsServerURI == "" { - xdsServerURI = tdURL - } - + xdsServerURI := getXdsServerURI() nodeCfg := newNodeConfig(<-zoneCh, <-ipv6CapableCh) xdsServerCfg := newXdsServerConfig(xdsServerURI) authoritiesCfg := newAuthoritiesConfig(xdsServerCfg) diff --git a/xds/googledirectpath/googlec2p_test.go b/xds/googledirectpath/googlec2p_test.go index afa4ea0c7f55..4b101b308014 100644 --- a/xds/googledirectpath/googlec2p_test.go +++ b/xds/googledirectpath/googlec2p_test.go @@ -82,6 +82,21 @@ func simulateRunningOnGCE(t *testing.T, gce bool) { t.Cleanup(func() { onGCE = oldOnGCE }) } +// ensure universeDomain is set to the expected default, +// and clean it up again after the test. +func useCleanUniverseDomain(t *testing.T) { + universeDomainMu.Lock() + defer universeDomainMu.Unlock() + if universeDomain != "" { + t.Fatalf("universe domain unexpectedly initialized: %v", universeDomain) + } + t.Cleanup(func() { + universeDomainMu.Lock() + universeDomain = "" + universeDomainMu.Unlock() + }) +} + // Tests the scenario where the bootstrap env vars are set and we're running on // GCE. The test builds a google-c2p resolver and verifies that an xDS resolver // is built and that we don't fallback to DNS (because federation is enabled by @@ -89,6 +104,7 @@ func simulateRunningOnGCE(t *testing.T, gce bool) { func (s) TestBuildWithBootstrapEnvSet(t *testing.T) { replaceResolvers(t) simulateRunningOnGCE(t, true) + useCleanUniverseDomain(t) builder := resolver.Get(c2pScheme) for i, envP := range []*string{&envconfig.XDSBootstrapFileName, &envconfig.XDSBootstrapFileContent} { @@ -118,6 +134,7 @@ func (s) TestBuildWithBootstrapEnvSet(t *testing.T) { func (s) TestBuildNotOnGCE(t *testing.T) { replaceResolvers(t) simulateRunningOnGCE(t, false) + useCleanUniverseDomain(t) builder := resolver.Get(c2pScheme) // Build the google-c2p resolver. @@ -152,6 +169,7 @@ func bootstrapConfig(t *testing.T, opts bootstrap.ConfigOptionsForTesting) *boot func (s) TestBuildXDS(t *testing.T) { replaceResolvers(t) simulateRunningOnGCE(t, true) + useCleanUniverseDomain(t) builder := resolver.Get(c2pScheme) // Override the zone returned by the metadata server. @@ -295,6 +313,7 @@ func (s) TestBuildXDS(t *testing.T) { // google-c2p scheme with a non-empty authority and verifies that it fails with // an expected error. func (s) TestBuildFailsWhenCalledWithAuthority(t *testing.T) { + useCleanUniverseDomain(t) uri := "google-c2p://an-authority/resource" cc, err := grpc.Dial(uri, grpc.WithTransportCredentials(insecure.NewCredentials())) defer func() { @@ -307,3 +326,188 @@ func (s) TestBuildFailsWhenCalledWithAuthority(t *testing.T) { t.Fatalf("grpc.Dial(%s) returned error: %v, want: %v", uri, err, wantErr) } } + +func (s) TestSetUniverseDomainNonDefault(t *testing.T) { + replaceResolvers(t) + simulateRunningOnGCE(t, true) + useCleanUniverseDomain(t) + builder := resolver.Get(c2pScheme) + + // Override the zone returned by the metadata server. + oldGetZone := getZone + getZone = func(time.Duration) string { return "test-zone" } + defer func() { getZone = oldGetZone }() + + // Override IPv6 capability returned by the metadata server. + oldGetIPv6Capability := getIPv6Capable + getIPv6Capable = func(time.Duration) bool { return false } + defer func() { getIPv6Capable = oldGetIPv6Capability }() + + // Override the random func used in the node ID. + origRandInd := randInt + randInt = func() int { return 666 } + defer func() { randInt = origRandInd }() + + // Set the universe domain + testUniverseDomain := "test-universe-domain.test" + if err := SetUniverseDomain(testUniverseDomain); err != nil { + t.Fatalf("SetUniverseDomain(%s) failed: %v", testUniverseDomain, err) + } + + // Now set universe domain to something different, it should fail + domain := "test-universe-domain-2.test" + err := SetUniverseDomain(domain) + wantErr := "already set" + if err == nil || !strings.Contains(err.Error(), wantErr) { + t.Fatalf("googlec2p.SetUniverseDomain(%s) returned error: %v, want: %v", domain, err, wantErr) + } + + // Now explicitly set universe domain to the default, it should also fail + domain = "googleapis.com" + err = SetUniverseDomain(domain) + wantErr = "already set" + if err == nil || !strings.Contains(err.Error(), wantErr) { + t.Fatalf("googlec2p.SetUniverseDomain(%s) returned error: %v, want: %v", domain, err, wantErr) + } + + // Now set universe domain to the original value, it should work + if err := SetUniverseDomain(testUniverseDomain); err != nil { + t.Fatalf("googlec2p.SetUniverseDomain(%s) failed: %v", testUniverseDomain, err) + } + + // Build the google-c2p resolver. + r, err := builder.Build(resolver.Target{}, nil, resolver.BuildOptions{}) + if err != nil { + t.Fatalf("failed to build resolver: %v", err) + } + defer r.Close() + + // Build should return xDS, not DNS. + if r != testXDSResolver { + t.Fatalf("Build() returned %#v, want xds resolver", r) + } + + gotConfig, err := bootstrap.GetConfiguration() + if err != nil { + t.Fatalf("Failed to get bootstrap config: %v", err) + } + + // Check that we use directpath-pa.test-universe-domain.test in the + // bootstrap config. + wantBootstrapConfig := bootstrapConfig(t, bootstrap.ConfigOptionsForTesting{ + Servers: []byte(`[{ + "server_uri": "dns:///directpath-pa.test-universe-domain.test", + "channel_creds": [{"type": "google_default"}], + "server_features": ["ignore_resource_deletion"] + }]`), + Authorities: map[string]json.RawMessage{ + "traffic-director-c2p.xds.googleapis.com": []byte(`{ + "xds_servers": [ + { + "server_uri": "dns:///directpath-pa.test-universe-domain.test", + "channel_creds": [{"type": "google_default"}], + "server_features": ["ignore_resource_deletion"] + } + ] + }`), + }, + Node: []byte(`{ + "id": "C2P-666", + "locality": {"zone": "test-zone"} + }`), + }) + if diff := cmp.Diff(wantBootstrapConfig, gotConfig); diff != "" { + t.Fatalf("Unexpected diff in bootstrap config (-want +got):\n%s", diff) + } +} + +func (s) TestDefaultUniverseDomain(t *testing.T) { + replaceResolvers(t) + simulateRunningOnGCE(t, true) + useCleanUniverseDomain(t) + builder := resolver.Get(c2pScheme) + + // Override the zone returned by the metadata server. + oldGetZone := getZone + getZone = func(time.Duration) string { return "test-zone" } + defer func() { getZone = oldGetZone }() + + // Override IPv6 capability returned by the metadata server. + oldGetIPv6Capability := getIPv6Capable + getIPv6Capable = func(time.Duration) bool { return false } + defer func() { getIPv6Capable = oldGetIPv6Capability }() + + // Override the random func used in the node ID. + origRandInd := randInt + randInt = func() int { return 666 } + defer func() { randInt = origRandInd }() + + // Build the google-c2p resolver. + r, err := builder.Build(resolver.Target{}, nil, resolver.BuildOptions{}) + if err != nil { + t.Fatalf("failed to build resolver: %v", err) + } + defer r.Close() + + // Build should return xDS, not DNS. + if r != testXDSResolver { + t.Fatalf("Build() returned %#v, want xds resolver", r) + } + + gotConfig, err := bootstrap.GetConfiguration() + if err != nil { + t.Fatalf("Failed to get bootstrap config: %v", err) + } + + // Check that we use directpath-pa.googleapis.com in the bootstrap config + wantBootstrapConfig := bootstrapConfig(t, bootstrap.ConfigOptionsForTesting{ + Servers: []byte(`[{ + "server_uri": "dns:///directpath-pa.googleapis.com", + "channel_creds": [{"type": "google_default"}], + "server_features": ["ignore_resource_deletion"] + }]`), + Authorities: map[string]json.RawMessage{ + "traffic-director-c2p.xds.googleapis.com": []byte(`{ + "xds_servers": [ + { + "server_uri": "dns:///directpath-pa.googleapis.com", + "channel_creds": [{"type": "google_default"}], + "server_features": ["ignore_resource_deletion"] + } + ] + }`), + }, + Node: []byte(`{ + "id": "C2P-666", + "locality": {"zone": "test-zone"} + }`), + }) + if diff := cmp.Diff(wantBootstrapConfig, gotConfig); diff != "" { + t.Fatalf("Unexpected diff in bootstrap config (-want +got):\n%s", diff) + } + + // Now set universe domain to something different than the default, it should fail + domain := "test-universe-domain.test" + err = SetUniverseDomain(domain) + wantErr := "already set" + if err == nil || !strings.Contains(err.Error(), wantErr) { + t.Fatalf("googlec2p.SetUniverseDomain(%s) returned error: %v, want: %v", domain, err, wantErr) + } + + // Now explicitly set universe domain to the default, it should work + domain = "googleapis.com" + if err := SetUniverseDomain(domain); err != nil { + t.Fatalf("googlec2p.SetUniverseDomain(%s) failed: %v", domain, err) + } +} + +func (s) TestSetUniverseDomainEmptyString(t *testing.T) { + replaceResolvers(t) + simulateRunningOnGCE(t, true) + useCleanUniverseDomain(t) + wantErr := "cannot be empty" + err := SetUniverseDomain("") + if err == nil || !strings.Contains(err.Error(), wantErr) { + t.Fatalf("googlec2p.SetUniverseDomain(\"\") returned error: %v, want: %v", err, wantErr) + } +} From c538c3115071d6d806ab3f4098838590ff6d9a8f Mon Sep 17 00:00:00 2001 From: Arjan Singh Bal <46515553+arjan-bal@users.noreply.github.com> Date: Tue, 22 Oct 2024 22:34:38 +0530 Subject: [PATCH 19/57] vet: Don't use GOROOT to set PATH if GOROOT is unset (#7761) --- scripts/vet.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/vet.sh b/scripts/vet.sh index aba59a5995bc..1d9991a55834 100755 --- a/scripts/vet.sh +++ b/scripts/vet.sh @@ -21,7 +21,10 @@ cleanup() { } trap cleanup EXIT -PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" +if [ -n "${GOROOT}" ]; then + PATH="${GOROOT}/bin:${PATH}" +fi +PATH="${HOME}/go/bin:${PATH}" go version if [[ "$1" = "-install" ]]; then From 80937a99d53cf90edd42db604068cd6030bf0398 Mon Sep 17 00:00:00 2001 From: Arjan Singh Bal <46515553+arjan-bal@users.noreply.github.com> Date: Tue, 22 Oct 2024 22:58:16 +0530 Subject: [PATCH 20/57] credentials: Apply defaults to TLS configs provided through GetConfigForClient (#7754) --- credentials/tls.go | 29 ++- credentials/tls_ext_test.go | 394 +++++++++++++++++++++++++++--------- 2 files changed, 324 insertions(+), 99 deletions(-) diff --git a/credentials/tls.go b/credentials/tls.go index 4114358545ef..e163a473df93 100644 --- a/credentials/tls.go +++ b/credentials/tls.go @@ -200,25 +200,40 @@ var tls12ForbiddenCipherSuites = map[uint16]struct{}{ // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { - tc := &tlsCreds{credinternal.CloneTLSConfig(c)} - tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + config := applyDefaults(c) + if config.GetConfigForClient != nil { + oldFn := config.GetConfigForClient + config.GetConfigForClient = func(hello *tls.ClientHelloInfo) (*tls.Config, error) { + cfgForClient, err := oldFn(hello) + if err != nil || cfgForClient == nil { + return cfgForClient, err + } + return applyDefaults(cfgForClient), nil + } + } + return &tlsCreds{config: config} +} + +func applyDefaults(c *tls.Config) *tls.Config { + config := credinternal.CloneTLSConfig(c) + config.NextProtos = credinternal.AppendH2ToNextProtos(config.NextProtos) // If the user did not configure a MinVersion and did not configure a // MaxVersion < 1.2, use MinVersion=1.2, which is required by // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 - if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { - tc.config.MinVersion = tls.VersionTLS12 + if config.MinVersion == 0 && (config.MaxVersion == 0 || config.MaxVersion >= tls.VersionTLS12) { + config.MinVersion = tls.VersionTLS12 } // If the user did not configure CipherSuites, use all "secure" cipher // suites reported by the TLS package, but remove some explicitly forbidden // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A - if tc.config.CipherSuites == nil { + if config.CipherSuites == nil { for _, cs := range tls.CipherSuites() { if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { - tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + config.CipherSuites = append(config.CipherSuites, cs.ID) } } } - return tc + return config } // NewClientTLSFromCert constructs TLS credentials from the provided root diff --git a/credentials/tls_ext_test.go b/credentials/tls_ext_test.go index c817777b2f89..22881a6f497a 100644 --- a/credentials/tls_ext_test.go +++ b/credentials/tls_ext_test.go @@ -79,43 +79,86 @@ func (s) TestTLS_MinVersion12(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - // Create server creds without a minimum version. - serverCreds := credentials.NewTLS(&tls.Config{ - // MinVersion should be set to 1.2 by gRPC by default. - Certificates: []tls.Certificate{serverCert}, - }) - ss := stubserver.StubServer{ - EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { - return &testpb.Empty{}, nil + testCases := []struct { + name string + serverTLS func() *tls.Config + }{ + { + name: "base_case", + serverTLS: func() *tls.Config { + return &tls.Config{ + // MinVersion should be set to 1.2 by gRPC by default. + Certificates: []tls.Certificate{serverCert}, + } + }, + }, + { + name: "fallback_to_base", + serverTLS: func() *tls.Config { + config := &tls.Config{ + // MinVersion should be set to 1.2 by gRPC by default. + Certificates: []tls.Certificate{serverCert}, + } + config.GetConfigForClient = func(*tls.ClientHelloInfo) (*tls.Config, error) { + return nil, nil + } + return config + }, + }, + { + name: "dynamic_using_get_config_for_client", + serverTLS: func() *tls.Config { + return &tls.Config{ + GetConfigForClient: func(*tls.ClientHelloInfo) (*tls.Config, error) { + return &tls.Config{ + // MinVersion should be set to 1.2 by gRPC by default. + Certificates: []tls.Certificate{serverCert}, + }, nil + }, + } + }, }, } - // Create client creds that supports V1.0-V1.1. - clientCreds := credentials.NewTLS(&tls.Config{ - ServerName: serverName, - RootCAs: certPool, - MinVersion: tls.VersionTLS10, - MaxVersion: tls.VersionTLS11, - }) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Create server creds without a minimum version. + serverCreds := credentials.NewTLS(tc.serverTLS()) + ss := stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } - // Start server and client separately, because Start() blocks on a - // successful connection, which we will not get. - if err := ss.StartServer(grpc.Creds(serverCreds)); err != nil { - t.Fatalf("Error starting server: %v", err) - } - defer ss.Stop() + // Create client creds that supports V1.0-V1.1. + clientCreds := credentials.NewTLS(&tls.Config{ + ServerName: serverName, + RootCAs: certPool, + MinVersion: tls.VersionTLS10, + MaxVersion: tls.VersionTLS11, + }) - cc, err := grpc.NewClient(ss.Address, grpc.WithTransportCredentials(clientCreds)) - if err != nil { - t.Fatalf("grpc.NewClient error: %v", err) - } - defer cc.Close() + // Start server and client separately, because Start() blocks on a + // successful connection, which we will not get. + if err := ss.StartServer(grpc.Creds(serverCreds)); err != nil { + t.Fatalf("Error starting server: %v", err) + } + defer ss.Stop() - client := testgrpc.NewTestServiceClient(cc) + cc, err := grpc.NewClient(ss.Address, grpc.WithTransportCredentials(clientCreds)) + if err != nil { + t.Fatalf("grpc.NewClient error: %v", err) + } + defer cc.Close() - const wantStr = "authentication handshake failed" - if _, err = client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable || !strings.Contains(status.Convert(err).Message(), wantStr) { - t.Fatalf("EmptyCall err = %v; want code=%v, message contains %q", err, codes.Unavailable, wantStr) + client := testgrpc.NewTestServiceClient(cc) + + const wantStr = "authentication handshake failed" + if _, err = client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable || !strings.Contains(status.Convert(err).Message(), wantStr) { + t.Fatalf("EmptyCall err = %v; want code=%v, message contains %q", err, codes.Unavailable, wantStr) + } + + }) } } @@ -129,35 +172,78 @@ func (s) TestTLS_MinVersionOverridable(t *testing.T) { for _, cs := range tls.CipherSuites() { allCipherSuites = append(allCipherSuites, cs.ID) } - - // Create server creds that allow v1.0. - serverCreds := credentials.NewTLS(&tls.Config{ - MinVersion: tls.VersionTLS10, - Certificates: []tls.Certificate{serverCert}, - CipherSuites: allCipherSuites, - }) - ss := stubserver.StubServer{ - EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { - return &testpb.Empty{}, nil + testCases := []struct { + name string + serverTLS func() *tls.Config + }{ + { + name: "base_case", + serverTLS: func() *tls.Config { + return &tls.Config{ + MinVersion: tls.VersionTLS10, + Certificates: []tls.Certificate{serverCert}, + CipherSuites: allCipherSuites, + } + }, + }, + { + name: "fallback_to_base", + serverTLS: func() *tls.Config { + config := &tls.Config{ + MinVersion: tls.VersionTLS10, + Certificates: []tls.Certificate{serverCert}, + CipherSuites: allCipherSuites, + } + config.GetConfigForClient = func(*tls.ClientHelloInfo) (*tls.Config, error) { + return nil, nil + } + return config + }, + }, + { + name: "dynamic_using_get_config_for_client", + serverTLS: func() *tls.Config { + return &tls.Config{ + GetConfigForClient: func(*tls.ClientHelloInfo) (*tls.Config, error) { + return &tls.Config{ + MinVersion: tls.VersionTLS10, + Certificates: []tls.Certificate{serverCert}, + CipherSuites: allCipherSuites, + }, nil + }, + } + }, }, } - // Create client creds that supports V1.0-V1.1. - clientCreds := credentials.NewTLS(&tls.Config{ - ServerName: serverName, - RootCAs: certPool, - CipherSuites: allCipherSuites, - MinVersion: tls.VersionTLS10, - MaxVersion: tls.VersionTLS11, - }) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Create server creds that allow v1.0. + serverCreds := credentials.NewTLS(tc.serverTLS()) + ss := stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } - if err := ss.Start([]grpc.ServerOption{grpc.Creds(serverCreds)}, grpc.WithTransportCredentials(clientCreds)); err != nil { - t.Fatalf("Error starting stub server: %v", err) - } - defer ss.Stop() + // Create client creds that supports V1.0-V1.1. + clientCreds := credentials.NewTLS(&tls.Config{ + ServerName: serverName, + RootCAs: certPool, + CipherSuites: allCipherSuites, + MinVersion: tls.VersionTLS10, + MaxVersion: tls.VersionTLS11, + }) - if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { - t.Fatalf("EmptyCall err = %v; want ", err) + if err := ss.Start([]grpc.ServerOption{grpc.Creds(serverCreds)}, grpc.WithTransportCredentials(clientCreds)); err != nil { + t.Fatalf("Error starting stub server: %v", err) + } + defer ss.Stop() + + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall err = %v; want ", err) + } + }) } } @@ -165,43 +251,82 @@ func (s) TestTLS_MinVersionOverridable(t *testing.T) { func (s) TestTLS_CipherSuites(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - - // Create server creds without cipher suites. - serverCreds := credentials.NewTLS(&tls.Config{ - Certificates: []tls.Certificate{serverCert}, - }) - ss := stubserver.StubServer{ - EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { - return &testpb.Empty{}, nil + testCases := []struct { + name string + serverTLS func() *tls.Config + }{ + { + name: "base_case", + serverTLS: func() *tls.Config { + return &tls.Config{ + Certificates: []tls.Certificate{serverCert}, + } + }, + }, + { + name: "fallback_to_base", + serverTLS: func() *tls.Config { + config := &tls.Config{ + Certificates: []tls.Certificate{serverCert}, + } + config.GetConfigForClient = func(*tls.ClientHelloInfo) (*tls.Config, error) { + return nil, nil + } + return config + }, + }, + { + name: "dynamic_using_get_config_for_client", + serverTLS: func() *tls.Config { + return &tls.Config{ + GetConfigForClient: func(*tls.ClientHelloInfo) (*tls.Config, error) { + return &tls.Config{ + Certificates: []tls.Certificate{serverCert}, + }, nil + }, + } + }, }, } - // Create client creds that use a forbidden suite only. - clientCreds := credentials.NewTLS(&tls.Config{ - ServerName: serverName, - RootCAs: certPool, - CipherSuites: []uint16{tls.TLS_RSA_WITH_AES_128_CBC_SHA}, - MaxVersion: tls.VersionTLS12, // TLS1.3 cipher suites are not configurable, so limit to 1.2. - }) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Create server creds without cipher suites. + serverCreds := credentials.NewTLS(tc.serverTLS()) + ss := stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } - // Start server and client separately, because Start() blocks on a - // successful connection, which we will not get. - if err := ss.StartServer(grpc.Creds(serverCreds)); err != nil { - t.Fatalf("Error starting server: %v", err) - } - defer ss.Stop() + // Create client creds that use a forbidden suite only. + clientCreds := credentials.NewTLS(&tls.Config{ + ServerName: serverName, + RootCAs: certPool, + CipherSuites: []uint16{tls.TLS_RSA_WITH_AES_128_CBC_SHA}, + MaxVersion: tls.VersionTLS12, // TLS1.3 cipher suites are not configurable, so limit to 1.2. + }) - cc, err := grpc.NewClient("dns:"+ss.Address, grpc.WithTransportCredentials(clientCreds)) - if err != nil { - t.Fatalf("grpc.NewClient error: %v", err) - } - defer cc.Close() + // Start server and client separately, because Start() blocks on a + // successful connection, which we will not get. + if err := ss.StartServer(grpc.Creds(serverCreds)); err != nil { + t.Fatalf("Error starting server: %v", err) + } + defer ss.Stop() - client := testgrpc.NewTestServiceClient(cc) + cc, err := grpc.NewClient("dns:"+ss.Address, grpc.WithTransportCredentials(clientCreds)) + if err != nil { + t.Fatalf("grpc.NewClient error: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) - const wantStr = "authentication handshake failed" - if _, err = client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable || !strings.Contains(status.Convert(err).Message(), wantStr) { - t.Fatalf("EmptyCall err = %v; want code=%v, message contains %q", err, codes.Unavailable, wantStr) + const wantStr = "authentication handshake failed" + if _, err = client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable || !strings.Contains(status.Convert(err).Message(), wantStr) { + t.Fatalf("EmptyCall err = %v; want code=%v, message contains %q", err, codes.Unavailable, wantStr) + } + }) } } @@ -210,23 +335,108 @@ func (s) TestTLS_CipherSuitesOverridable(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - // Create server that allows only a forbidden cipher suite. + testCases := []struct { + name string + serverTLS func() *tls.Config + }{ + { + name: "base_case", + serverTLS: func() *tls.Config { + return &tls.Config{ + Certificates: []tls.Certificate{serverCert}, + CipherSuites: []uint16{tls.TLS_RSA_WITH_AES_128_CBC_SHA}, + } + }, + }, + { + name: "fallback_to_base", + serverTLS: func() *tls.Config { + config := &tls.Config{ + Certificates: []tls.Certificate{serverCert}, + CipherSuites: []uint16{tls.TLS_RSA_WITH_AES_128_CBC_SHA}, + } + config.GetConfigForClient = func(*tls.ClientHelloInfo) (*tls.Config, error) { + return nil, nil + } + return config + }, + }, + { + name: "dynamic_using_get_config_for_client", + serverTLS: func() *tls.Config { + return &tls.Config{ + GetConfigForClient: func(*tls.ClientHelloInfo) (*tls.Config, error) { + return &tls.Config{ + Certificates: []tls.Certificate{serverCert}, + CipherSuites: []uint16{tls.TLS_RSA_WITH_AES_128_CBC_SHA}, + }, nil + }, + } + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Create server that allows only a forbidden cipher suite. + serverCreds := credentials.NewTLS(tc.serverTLS()) + ss := stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + + // Create server that allows only a forbidden cipher suite. + clientCreds := credentials.NewTLS(&tls.Config{ + ServerName: serverName, + RootCAs: certPool, + CipherSuites: []uint16{tls.TLS_RSA_WITH_AES_128_CBC_SHA}, + MaxVersion: tls.VersionTLS12, // TLS1.3 cipher suites are not configurable, so limit to 1.2. + }) + + if err := ss.Start([]grpc.ServerOption{grpc.Creds(serverCreds)}, grpc.WithTransportCredentials(clientCreds)); err != nil { + t.Fatalf("Error starting stub server: %v", err) + } + defer ss.Stop() + + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall err = %v; want ", err) + } + }) + } +} + +// TestTLS_ServerConfiguresALPNByDefault verifies that ALPN is configured +// correctly for a server that doesn't specify the NextProtos field and uses +// GetConfigForClient to provide the TLS config during the handshake. +func (s) TestTLS_ServerConfiguresALPNByDefault(t *testing.T) { + initialVal := envconfig.EnforceALPNEnabled + defer func() { + envconfig.EnforceALPNEnabled = initialVal + }() + envconfig.EnforceALPNEnabled = true + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Create a server that doesn't set the NextProtos field. serverCreds := credentials.NewTLS(&tls.Config{ - Certificates: []tls.Certificate{serverCert}, - CipherSuites: []uint16{tls.TLS_RSA_WITH_AES_128_CBC_SHA}, + GetConfigForClient: func(*tls.ClientHelloInfo) (*tls.Config, error) { + return &tls.Config{ + Certificates: []tls.Certificate{serverCert}, + }, nil + }, }) + ss := stubserver.StubServer{ EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, } - // Create server that allows only a forbidden cipher suite. clientCreds := credentials.NewTLS(&tls.Config{ - ServerName: serverName, - RootCAs: certPool, - CipherSuites: []uint16{tls.TLS_RSA_WITH_AES_128_CBC_SHA}, - MaxVersion: tls.VersionTLS12, // TLS1.3 cipher suites are not configurable, so limit to 1.2. + ServerName: serverName, + RootCAs: certPool, }) if err := ss.Start([]grpc.ServerOption{grpc.Creds(serverCreds)}, grpc.WithTransportCredentials(clientCreds)); err != nil { From 4bb0170ac65f2726d593eb7364a807b171fa1d63 Mon Sep 17 00:00:00 2001 From: Arjan Singh Bal <46515553+arjan-bal@users.noreply.github.com> Date: Tue, 22 Oct 2024 23:16:16 +0530 Subject: [PATCH 21/57] status: Fix status incompatibility introduced by #6919 and move non-regeneratable proto code into /testdata (#7724) --- go.mod | 1 + go.sum | 2 + internal/status/status.go | 35 +++++++- interop/xds/go.sum | 2 + reflection/test/go.mod | 18 ---- reflection/test/go.sum | 14 ---- reflection/test/serverreflection_test.go | 2 +- scripts/regenerate.sh | 12 +-- scripts/vet.sh | 10 +-- security/advancedtls/examples/go.sum | 2 + security/advancedtls/go.sum | 2 + stats/opencensus/go.sum | 2 + stats/opentelemetry/go.sum | 2 + status/status_ext_test.go | 56 +++++++++++++ .../grpc_testing_not_regenerated}/README.md | 4 + .../grpc_testing_not_regenerated}/dynamic.go | 2 +- .../dynamic.proto | 4 +- .../grpc_testing_not_regenerated/simple.proto | 27 ++++++ .../simple_message_v1.go | 83 +++++++++++++++++++ .../grpc_testing_not_regenerated}/testv3.go | 4 +- .../testv3.proto | 4 +- 21 files changed, 236 insertions(+), 52 deletions(-) delete mode 100644 reflection/test/go.mod delete mode 100644 reflection/test/go.sum rename {reflection/test/grpc_testing_not_regenerate => testdata/grpc_testing_not_regenerated}/README.md (69%) rename {reflection/test/grpc_testing_not_regenerate => testdata/grpc_testing_not_regenerated}/dynamic.go (98%) rename {reflection/test/grpc_testing_not_regenerate => testdata/grpc_testing_not_regenerated}/dynamic.proto (91%) create mode 100644 testdata/grpc_testing_not_regenerated/simple.proto create mode 100644 testdata/grpc_testing_not_regenerated/simple_message_v1.go rename {reflection/test/grpc_testing_not_regenerate => testdata/grpc_testing_not_regenerated}/testv3.go (99%) rename {reflection/test/grpc_testing_not_regenerate => testdata/grpc_testing_not_regenerated}/testv3.proto (93%) diff --git a/go.mod b/go.mod index d87907da3855..80f755087ca0 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ require ( github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 github.com/envoyproxy/go-control-plane v0.13.1 github.com/golang/glog v1.2.2 + github.com/golang/protobuf v1.5.4 github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 golang.org/x/net v0.30.0 diff --git a/go.sum b/go.sum index c62416214d25..8ed7162313b9 100644 --- a/go.sum +++ b/go.sum @@ -16,6 +16,8 @@ github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6 github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= diff --git a/internal/status/status.go b/internal/status/status.go index 757925381fe7..1186f1e9a9ad 100644 --- a/internal/status/status.go +++ b/internal/status/status.go @@ -149,6 +149,8 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. +// If the detail can be decoded, the proto message returned is of the same +// type that was given to WithDetails(). func (s *Status) Details() []any { if s == nil || s.s == nil { return nil @@ -160,7 +162,38 @@ func (s *Status) Details() []any { details = append(details, err) continue } - details = append(details, detail) + // The call to MessageV1Of is required to unwrap the proto message if + // it implemented only the MessageV1 API. The proto message would have + // been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are + // added to a global registry used by any.UnmarshalNew(). + // MessageV1Of has the following behaviour: + // 1. If the given message is a wrapped MessageV1, it returns the + // unwrapped value. + // 2. If the given message already implements MessageV1, it returns it + // as is. + // 3. Else, it wraps the MessageV2 in a MessageV1 wrapper. + // + // Since the Status.WithDetails() API only accepts MessageV1, calling + // MessageV1Of ensures we return the same type that was given to + // WithDetails: + // * If the give type implemented only MessageV1, the unwrapping from + // point 1 above will restore the type. + // * If the given type implemented both MessageV1 and MessageV2, point 2 + // above will ensure no wrapping is performed. + // * If the given type implemented only MessageV2 and was wrapped using + // MessageV1Of before passing to WithDetails(), it would be unwrapped + // in WithDetails by calling MessageV2Of(). Point 3 above will ensure + // that the type is wrapped in a MessageV1 wrapper again before + // returning. Note that protoc-gen-go doesn't generate code which + // implements ONLY MessageV2 at the time of writing. + // + // NOTE: Status details can also be added using the FromProto method. + // This could theoretically allow passing a Detail message that only + // implements the V2 API. In such a case the message will be wrapped in + // a MessageV1 wrapper when fetched using Details(). + // Since protoc-gen-go generates only code that implements both V1 and + // V2 APIs for backward compatibility, this is not a concern. + details = append(details, protoadapt.MessageV1Of(detail)) } return details } diff --git a/interop/xds/go.sum b/interop/xds/go.sum index ef695b391a3d..16f77351e6eb 100644 --- a/interop/xds/go.sum +++ b/interop/xds/go.sum @@ -23,6 +23,8 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= diff --git a/reflection/test/go.mod b/reflection/test/go.mod deleted file mode 100644 index 7382e0e9842e..000000000000 --- a/reflection/test/go.mod +++ /dev/null @@ -1,18 +0,0 @@ -module google.golang.org/grpc/reflection/test - -go 1.22.7 - -replace google.golang.org/grpc => ../../ - -require ( - github.com/golang/protobuf v1.5.4 - google.golang.org/grpc v1.67.1 - google.golang.org/protobuf v1.35.1 -) - -require ( - golang.org/x/net v0.30.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.19.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect -) diff --git a/reflection/test/go.sum b/reflection/test/go.sum deleted file mode 100644 index c1616371c89e..000000000000 --- a/reflection/test/go.sum +++ /dev/null @@ -1,14 +0,0 @@ -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= diff --git a/reflection/test/serverreflection_test.go b/reflection/test/serverreflection_test.go index 29698c604a14..0a436f2aa8e8 100644 --- a/reflection/test/serverreflection_test.go +++ b/reflection/test/serverreflection_test.go @@ -43,7 +43,7 @@ import ( v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" pb "google.golang.org/grpc/reflection/grpc_testing" - pbv3 "google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate" + pbv3 "google.golang.org/grpc/testdata/grpc_testing_not_regenerated" ) var ( diff --git a/scripts/regenerate.sh b/scripts/regenerate.sh index 282774b1d1de..55fa279daf30 100755 --- a/scripts/regenerate.sh +++ b/scripts/regenerate.sh @@ -26,9 +26,9 @@ export PATH="${GOBIN}:${PATH}" mkdir -p "${GOBIN}" echo "removing existing generated files..." -# grpc_testing_not_regenerate/*.pb.go is not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate' | xargs rm -f || true +# grpc_testing_not_regenerated/*.pb.go is not re-generated, +# see grpc_testing_not_regenerated/README.md for details. +find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerated' | xargs rm -f || true echo "Executing: go install google.golang.org/protobuf/cmd/protoc-gen-go..." (cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) @@ -124,8 +124,8 @@ done mkdir -p "${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1" mv "${WORKDIR}"/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* "${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1" -# grpc_testing_not_regenerate/*.pb.go are not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm "${WORKDIR}"/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go +# grpc_testing_not_regenerated/*.pb.go are not re-generated, +# see grpc_testing_not_regenerated/README.md for details. +rm "${WORKDIR}"/out/google.golang.org/grpc/testdata/grpc_testing_not_regenerated/*.pb.go cp -R "${WORKDIR}"/out/google.golang.org/grpc/* . diff --git a/scripts/vet.sh b/scripts/vet.sh index 1d9991a55834..0d2b8b8f42ba 100755 --- a/scripts/vet.sh +++ b/scripts/vet.sh @@ -59,13 +59,13 @@ git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Exampl git grep -l 'time.After(' -- "*.go" | not grep -v '_test.go\|test_utils\|testutils' # - Do not use "interface{}"; use "any" instead. -git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate' +git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerated' # - Do not call grpclog directly. Use grpclog.Component instead. git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go\|^internal/grpclog/prefix_logger.go' # - Ensure that the deprecated protobuf dependency is not used. -not git grep "\"github.com/golang/protobuf/*" -- "*.go" ':(exclude)reflection/test/grpc_testing_not_regenerate/*' +not git grep "\"github.com/golang/protobuf/*" -- "*.go" ':(exclude)testdata/grpc_testing_not_regenerated/*' # - Ensure all usages of grpc_testing package are renamed when importing. not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" @@ -112,7 +112,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do noret_grep -v "(ST1000)" "${SC_OUT}" | noret_grep -v "(SA1019)" | noret_grep -v "(ST1003)" | noret_grep -v "(ST1019)\|\(other import of\)" | not grep -v "(SA4000)" # Exclude underscore checks for generated code. - noret_grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)' + noret_grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerated\)' # Error for duplicate imports not including grpc protos. noret_grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused @@ -149,7 +149,7 @@ XXXXX PleaseIgnoreUnused' XXXXX Protobuf related deprecation errors: "github.com/golang/protobuf .pb.go: -grpc_testing_not_regenerate +grpc_testing_not_regenerated : ptypes. proto.RegisterType XXXXX gRPC internal usage deprecation errors: @@ -191,7 +191,7 @@ done # Error for violation of enabled lint rules in config excluding generated code. revive \ -set_exit_status=1 \ - -exclude "reflection/test/grpc_testing_not_regenerate/" \ + -exclude "testdata/grpc_testing_not_regenerated/" \ -exclude "**/*.pb.go" \ -formatter plain \ -config "$(dirname "$0")/revive.toml" \ diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index d43932726ef0..9102af782ca0 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -1,3 +1,5 @@ +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index d43932726ef0..9102af782ca0 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -1,3 +1,5 @@ +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= diff --git a/stats/opencensus/go.sum b/stats/opencensus/go.sum index d382006665f0..f545f284c333 100644 --- a/stats/opencensus/go.sum +++ b/stats/opencensus/go.sum @@ -859,6 +859,8 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= diff --git a/stats/opentelemetry/go.sum b/stats/opentelemetry/go.sum index 14411e4f49c3..b2d047c8cb0f 100644 --- a/stats/opentelemetry/go.sum +++ b/stats/opentelemetry/go.sum @@ -21,6 +21,8 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= diff --git a/status/status_ext_test.go b/status/status_ext_test.go index 659d10d0deb0..118f678924e3 100644 --- a/status/status_ext_test.go +++ b/status/status_ext_test.go @@ -21,6 +21,7 @@ package status_test import ( "context" "errors" + "reflect" "strings" "testing" "time" @@ -35,8 +36,10 @@ import ( "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/protoadapt" + "google.golang.org/protobuf/testing/protocmp" testpb "google.golang.org/grpc/interop/grpc_testing" + tpb "google.golang.org/grpc/testdata/grpc_testing_not_regenerated" ) const defaultTestTimeout = 10 * time.Second @@ -203,3 +206,56 @@ func (s) TestStatusDetails(t *testing.T) { }) } } + +// TestStatus_ErrorDetailsMessageV1 verifies backward compatibility of the +// status.Details() method when using protobuf code generated with only the +// MessageV1 API implementation. +func (s) TestStatus_ErrorDetailsMessageV1(t *testing.T) { + details := []protoadapt.MessageV1{ + &tpb.SimpleMessage{Data: "abc"}, + } + s, err := status.New(codes.Aborted, "").WithDetails(details...) + if err != nil { + t.Fatalf("(%v).WithDetails(%+v) failed: %v", s, details, err) + } + gotDetails := s.Details() + for i, msg := range gotDetails { + if got, want := reflect.TypeOf(msg), reflect.TypeOf(details[i]); got != want { + t.Errorf("reflect.Typeof(%v) = %v, want = %v", msg, got, want) + } + if _, ok := msg.(protoadapt.MessageV1); !ok { + t.Errorf("(%v).Details() returned message that doesn't implement protoadapt.MessageV1: %v", s, msg) + } + if diff := cmp.Diff(msg, details[i], protocmp.Transform()); diff != "" { + t.Errorf("(%v).Details got unexpected output, diff (-got +want):\n%s", s, diff) + } + } +} + +// TestStatus_ErrorDetailsMessageV1AndV2 verifies that status.Details() method +// returns the same message types when using protobuf code generated with both the +// MessageV1 and MessageV2 API implementations. +func (s) TestStatus_ErrorDetailsMessageV1AndV2(t *testing.T) { + details := []protoadapt.MessageV1{ + &testpb.Empty{}, + } + s, err := status.New(codes.Aborted, "").WithDetails(details...) + if err != nil { + t.Fatalf("(%v).WithDetails(%+v) failed: %v", s, details, err) + } + gotDetails := s.Details() + for i, msg := range gotDetails { + if got, want := reflect.TypeOf(msg), reflect.TypeOf(details[i]); got != want { + t.Errorf("reflect.Typeof(%v) = %v, want = %v", msg, got, want) + } + if _, ok := msg.(protoadapt.MessageV1); !ok { + t.Errorf("(%v).Details() returned message that doesn't implement protoadapt.MessageV1: %v", s, msg) + } + if _, ok := msg.(protoadapt.MessageV2); !ok { + t.Errorf("(%v).Details() returned message that doesn't implement protoadapt.MessageV2: %v", s, msg) + } + if diff := cmp.Diff(msg, details[i], protocmp.Transform()); diff != "" { + t.Errorf("(%v).Details got unexpected output, diff (-got +want):\n%s", s, diff) + } + } +} diff --git a/reflection/test/grpc_testing_not_regenerate/README.md b/testdata/grpc_testing_not_regenerated/README.md similarity index 69% rename from reflection/test/grpc_testing_not_regenerate/README.md rename to testdata/grpc_testing_not_regenerated/README.md index 34e164662fa6..8ff72f054bff 100644 --- a/reflection/test/grpc_testing_not_regenerate/README.md +++ b/testdata/grpc_testing_not_regenerated/README.md @@ -6,3 +6,7 @@ with `"context"`. `dynamic.go` was generated with a newer protoc and manually edited to remove everything except the descriptor bytes var, which is renamed and exported. + +`simple_message_v1.go` was generated using protoc-gen-go v1.3.5 which doesn't +support the MesssageV2 API. As a result the generated code implements only the +old MessageV1 API. diff --git a/reflection/test/grpc_testing_not_regenerate/dynamic.go b/testdata/grpc_testing_not_regenerated/dynamic.go similarity index 98% rename from reflection/test/grpc_testing_not_regenerate/dynamic.go rename to testdata/grpc_testing_not_regenerated/dynamic.go index 35e4f02478b2..e75dbea73f8d 100644 --- a/reflection/test/grpc_testing_not_regenerate/dynamic.go +++ b/testdata/grpc_testing_not_regenerated/dynamic.go @@ -15,7 +15,7 @@ * */ -package grpc_testing_not_regenerate +package grpc_testing_not_regenerated // FileDynamicProtoRawDesc is the descriptor for dynamic.proto, see README.md. var FileDynamicProtoRawDesc = []byte{ diff --git a/reflection/test/grpc_testing_not_regenerate/dynamic.proto b/testdata/grpc_testing_not_regenerated/dynamic.proto similarity index 91% rename from reflection/test/grpc_testing_not_regenerate/dynamic.proto rename to testdata/grpc_testing_not_regenerated/dynamic.proto index 6b6b0a72a589..72e491563446 100644 --- a/reflection/test/grpc_testing_not_regenerate/dynamic.proto +++ b/testdata/grpc_testing_not_regenerated/dynamic.proto @@ -17,10 +17,10 @@ syntax = "proto3"; -option go_package = "google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate"; - package grpc.testing; +option go_package = "google.golang.org/grpc/testdata/grpc_testing_not_regenerated"; + message DynamicRes {} message DynamicReq {} diff --git a/testdata/grpc_testing_not_regenerated/simple.proto b/testdata/grpc_testing_not_regenerated/simple.proto new file mode 100644 index 000000000000..c8b13c779eb1 --- /dev/null +++ b/testdata/grpc_testing_not_regenerated/simple.proto @@ -0,0 +1,27 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +syntax = "proto3"; + +package grpc.testdata.grpc_testing_not_regenerated; + +option go_package = "google.golang.org/grpc/testdata/grpc_testing_not_regenerated"; + +// SimpleMessage is used to hold string data. +message SimpleMessage { + string data = 1; +} diff --git a/testdata/grpc_testing_not_regenerated/simple_message_v1.go b/testdata/grpc_testing_not_regenerated/simple_message_v1.go new file mode 100644 index 000000000000..7bf9ee2b8892 --- /dev/null +++ b/testdata/grpc_testing_not_regenerated/simple_message_v1.go @@ -0,0 +1,83 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: simple.proto + +package grpc_testing_not_regenerated + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// SimpleMessage is used to hold string data. +type SimpleMessage struct { + Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SimpleMessage) Reset() { *m = SimpleMessage{} } +func (m *SimpleMessage) String() string { return proto.CompactTextString(m) } +func (*SimpleMessage) ProtoMessage() {} +func (*SimpleMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_5ffd045dd4d042c1, []int{0} +} + +func (m *SimpleMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SimpleMessage.Unmarshal(m, b) +} +func (m *SimpleMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SimpleMessage.Marshal(b, m, deterministic) +} +func (m *SimpleMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_SimpleMessage.Merge(m, src) +} +func (m *SimpleMessage) XXX_Size() int { + return xxx_messageInfo_SimpleMessage.Size(m) +} +func (m *SimpleMessage) XXX_DiscardUnknown() { + xxx_messageInfo_SimpleMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_SimpleMessage proto.InternalMessageInfo + +func (m *SimpleMessage) GetData() string { + if m != nil { + return m.Data + } + return "" +} + +func init() { + proto.RegisterType((*SimpleMessage)(nil), "grpc.testdata.grpc_testing_not_regenerated.SimpleMessage") +} + +func init() { + proto.RegisterFile("simple.proto", fileDescriptor_5ffd045dd4d042c1) +} + +var fileDescriptor_5ffd045dd4d042c1 = []byte{ + // 142 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xce, 0xcc, 0x2d, + 0xc8, 0x49, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xd2, 0x4a, 0x2f, 0x2a, 0x48, 0xd6, 0x2b, + 0x49, 0x2d, 0x2e, 0x49, 0x49, 0x2c, 0x49, 0xd4, 0x03, 0xf1, 0xe2, 0x41, 0xbc, 0xcc, 0xbc, 0xf4, + 0xf8, 0xbc, 0xfc, 0x92, 0xf8, 0xa2, 0xd4, 0xf4, 0xd4, 0xbc, 0xd4, 0xa2, 0xc4, 0x92, 0xd4, 0x14, + 0x25, 0x65, 0x2e, 0xde, 0x60, 0xb0, 0x5e, 0xdf, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x21, 0x21, + 0x2e, 0x16, 0x90, 0x2e, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x30, 0xdb, 0xc9, 0x2e, 0xca, + 0x26, 0x3d, 0x3f, 0x3f, 0x3d, 0x27, 0x55, 0x2f, 0x3d, 0x3f, 0x27, 0x31, 0x2f, 0x5d, 0x2f, 0xbf, + 0x28, 0x5d, 0x1f, 0x64, 0xac, 0x3e, 0xcc, 0x12, 0x7d, 0x7c, 0x96, 0x24, 0xb1, 0x81, 0xdd, 0x65, + 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x80, 0xd6, 0x07, 0xa7, 0x00, 0x00, 0x00, +} diff --git a/reflection/test/grpc_testing_not_regenerate/testv3.go b/testdata/grpc_testing_not_regenerated/testv3.go similarity index 99% rename from reflection/test/grpc_testing_not_regenerate/testv3.go rename to testdata/grpc_testing_not_regenerated/testv3.go index 8a690963ec10..7c55c9adb093 100644 --- a/reflection/test/grpc_testing_not_regenerate/testv3.go +++ b/testdata/grpc_testing_not_regenerated/testv3.go @@ -20,7 +20,7 @@ // DO NOT EDIT! /* -Package grpc_testing_not_regenerate is a generated protocol buffer package. +Package grpc_testing_not_regenerated is a generated protocol buffer package. It is generated from these files: @@ -31,7 +31,7 @@ It has these top-level messages: SearchResponseV3 SearchRequestV3 */ -package grpc_testing_not_regenerate +package grpc_testing_not_regenerated import ( context "context" diff --git a/reflection/test/grpc_testing_not_regenerate/testv3.proto b/testdata/grpc_testing_not_regenerated/testv3.proto similarity index 93% rename from reflection/test/grpc_testing_not_regenerate/testv3.proto rename to testdata/grpc_testing_not_regenerated/testv3.proto index 9902ad67964b..c8eee856b5cb 100644 --- a/reflection/test/grpc_testing_not_regenerate/testv3.proto +++ b/testdata/grpc_testing_not_regenerated/testv3.proto @@ -17,10 +17,10 @@ syntax = "proto3"; -option go_package = "google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate"; - package grpc.testingv3; +option go_package = "google.golang.org/grpc/testdata/grpc_testing_not_regenerated"; + message SearchResponseV3 { message Result { string url = 1; From 8212cf0376831ce8b88b824128f113ae6e90b4c8 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 23 Oct 2024 09:59:37 -0700 Subject: [PATCH 22/57] xdsclient: implementation of the xdsChannel (#7757) --- xds/internal/xdsclient/authority.go | 2 +- xds/internal/xdsclient/channel.go | 305 ++++++ xds/internal/xdsclient/channel_test.go | 972 ++++++++++++++++++ .../xdsclient/transport/ads/ads_stream.go | 7 + .../xdsresource/cluster_resource_type.go | 4 +- .../xdsresource/endpoints_resource_type.go | 4 +- .../xdsresource/listener_resource_type.go | 4 +- .../xdsclient/xdsresource/resource_type.go | 6 +- .../xdsresource/route_config_resource_type.go | 4 +- 9 files changed, 1296 insertions(+), 12 deletions(-) create mode 100644 xds/internal/xdsclient/channel.go create mode 100644 xds/internal/xdsclient/channel_test.go diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index 668c436fb5fb..e1fce2340e67 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -254,7 +254,7 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty // - this update is different from the one currently cached // - the previous update for this resource was NACKed, but the update // before that was the same as this update. - if state.cache == nil || !state.cache.Equal(uErr.resource) || state.md.ErrState != nil { + if state.cache == nil || !state.cache.RawEqual(uErr.resource) || state.md.ErrState != nil { for watcher := range state.watchers { watcher := watcher resource := uErr.resource diff --git a/xds/internal/xdsclient/channel.go b/xds/internal/xdsclient/channel.go new file mode 100644 index 000000000000..4a1e73c943b1 --- /dev/null +++ b/xds/internal/xdsclient/channel.go @@ -0,0 +1,305 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "errors" + "fmt" + "time" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + igrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/transport" + "google.golang.org/grpc/xds/internal/xdsclient/transport/ads" + "google.golang.org/grpc/xds/internal/xdsclient/transport/lrs" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// xdsChannelEventHandler wraps callbacks used to notify the xDS client about +// events on the xdsChannel. Methods in this interface may be invoked +// concurrently and the xDS client implementation needs to handle them in a +// thread-safe manner. +type xdsChannelEventHandler interface { + // adsStreamFailure is called when the xdsChannel encounters an ADS stream + // failure. + adsStreamFailure(error) + + // adsResourceUpdate is called when the xdsChannel receives an ADS response + // from the xDS management server. The callback is provided with the + // following: + // - the resource type of the resources in the response + // - a map of resources in the response, keyed by resource name + // - the metadata associated with the response + // - a callback to be invoked when the updated is processed + adsResourceUpdate(xdsresource.Type, map[string]ads.DataAndErrTuple, xdsresource.UpdateMetadata, func()) + + // adsResourceDoesNotExist is called when the xdsChannel determines that a + // requested ADS resource does not exist. + adsResourceDoesNotExist(xdsresource.Type, string) +} + +// xdsChannelOpts holds the options for creating a new xdsChannel. +type xdsChannelOpts struct { + transport transport.Interface // Takes ownership of this transport. + serverConfig *bootstrap.ServerConfig // Configuration of the server to connect to. + bootstrapConfig *bootstrap.Config // Complete bootstrap configuration, used to decode resources. + resourceTypeGetter func(string) xdsresource.Type // Function to retrieve resource parsing functionality, based on resource type. + eventHandler xdsChannelEventHandler // Callbacks for ADS stream events. + backoff func(int) time.Duration // Backoff function to use for stream retries. Defaults to exponential backoff, if unset. + watchExpiryTimeout time.Duration // Timeout for ADS resource watch expiry. + logPrefix string // Prefix to use for logging. +} + +// newXDSChannel creates a new xdsChannel instance with the provided options. +// It performs basic validation on the provided options and initializes the +// xdsChannel with the necessary components. +func newXDSChannel(opts xdsChannelOpts) (*xdsChannel, error) { + switch { + case opts.transport == nil: + return nil, errors.New("xdsChannel: transport is nil") + case opts.serverConfig == nil: + return nil, errors.New("xdsChannel: serverConfig is nil") + case opts.bootstrapConfig == nil: + return nil, errors.New("xdsChannel: bootstrapConfig is nil") + case opts.resourceTypeGetter == nil: + return nil, errors.New("xdsChannel: resourceTypeGetter is nil") + case opts.eventHandler == nil: + return nil, errors.New("xdsChannel: eventHandler is nil") + } + + xc := &xdsChannel{ + transport: opts.transport, + serverConfig: opts.serverConfig, + bootstrapConfig: opts.bootstrapConfig, + resourceTypeGetter: opts.resourceTypeGetter, + eventHandler: opts.eventHandler, + closed: grpcsync.NewEvent(), + } + + l := grpclog.Component("xds") + logPrefix := opts.logPrefix + fmt.Sprintf("[xds-channel %p] ", xc) + xc.logger = igrpclog.NewPrefixLogger(l, logPrefix) + + if opts.backoff == nil { + opts.backoff = backoff.DefaultExponential.Backoff + } + xc.ads = ads.NewStreamImpl(ads.StreamOpts{ + Transport: xc.transport, + EventHandler: xc, + Backoff: opts.backoff, + NodeProto: xc.bootstrapConfig.Node(), + WatchExpiryTimeout: opts.watchExpiryTimeout, + LogPrefix: logPrefix, + }) + xc.lrs = lrs.NewStreamImpl(lrs.StreamOpts{ + Transport: xc.transport, + Backoff: opts.backoff, + NodeProto: xc.bootstrapConfig.Node(), + LogPrefix: logPrefix, + }) + return xc, nil +} + +// xdsChannel represents a client channel to a management server, and is +// responsible for managing the lifecycle of the ADS and LRS streams. It invokes +// callbacks on the registered event handler for various ADS stream events. +type xdsChannel struct { + // The following fields are initialized at creation time and are read-only + // after that, and hence need not be guarded by a mutex. + transport transport.Interface // Takes ownership of this transport (used to make streaming calls). + ads *ads.StreamImpl // An ADS stream to the management server. + lrs *lrs.StreamImpl // An LRS stream to the management server. + serverConfig *bootstrap.ServerConfig // Configuration of the server to connect to. + bootstrapConfig *bootstrap.Config // Complete bootstrap configuration, used to decode resources. + resourceTypeGetter func(string) xdsresource.Type // Function to retrieve resource parsing functionality, based on resource type. + eventHandler xdsChannelEventHandler // Callbacks for ADS stream events. + logger *igrpclog.PrefixLogger // Logger to use for logging. + closed *grpcsync.Event // Fired when the channel is closed. +} + +func (xc *xdsChannel) close() { + xc.closed.Fire() + xc.ads.Stop() + xc.lrs.Stop() + xc.transport.Close() + xc.logger.Infof("Shutdown") +} + +// reportLoad returns a load.Store that can be used to report load to the LRS, and a +// function that can be called to stop reporting load. +func (xc *xdsChannel) reportLoad() (*load.Store, func()) { + if xc.closed.HasFired() { + if xc.logger.V(2) { + xc.logger.Infof("Attempt to start load reporting on closed channel") + } + return nil, func() {} + } + return xc.lrs.ReportLoad() +} + +// subscribe adds a subscription for the given resource name of the given +// resource type on the ADS stream. +func (xc *xdsChannel) subscribe(typ xdsresource.Type, name string) { + if xc.closed.HasFired() { + if xc.logger.V(2) { + xc.logger.Infof("Attempt to subscribe to an xDS resource of type %s and name %q on a closed channel", typ.TypeName(), name) + } + return + } + xc.ads.Subscribe(typ, name) +} + +// unsubscribe removes the subscription for the given resource name of the given +// resource type from the ADS stream. +func (xc *xdsChannel) unsubscribe(typ xdsresource.Type, name string) { + if xc.closed.HasFired() { + if xc.logger.V(2) { + xc.logger.Infof("Attempt to unsubscribe to an xDS resource of type %s and name %q on a closed channel", typ.TypeName(), name) + } + return + } + xc.ads.Unsubscribe(typ, name) +} + +// The following OnADSXxx() methods implement the ads.StreamEventHandler interface +// and are invoked by the ADS stream implementation. + +// OnADSStreamError is invoked when an error occurs on the ADS stream. It +// propagates the update to the xDS client. +func (xc *xdsChannel) OnADSStreamError(err error) { + if xc.closed.HasFired() { + if xc.logger.V(2) { + xc.logger.Infof("Received ADS stream error on a closed xdsChannel: %v", err) + } + return + } + xc.eventHandler.adsStreamFailure(err) +} + +// OnADSWatchExpiry is invoked when a watch for a resource expires. It +// propagates the update to the xDS client. +func (xc *xdsChannel) OnADSWatchExpiry(typ xdsresource.Type, name string) { + if xc.closed.HasFired() { + if xc.logger.V(2) { + xc.logger.Infof("Received ADS resource watch expiry for resource %q on a closed xdsChannel", name) + } + return + } + xc.eventHandler.adsResourceDoesNotExist(typ, name) +} + +// OnADSResponse is invoked when a response is received on the ADS stream. It +// decodes the resources in the response, and propagates the updates to the xDS +// client. +// +// It returns the list of resource names in the response and any errors +// encountered during decoding. +func (xc *xdsChannel) OnADSResponse(resp ads.Response, onDone func()) ([]string, error) { + if xc.closed.HasFired() { + if xc.logger.V(2) { + xc.logger.Infof("Received an update from the ADS stream on closed ADS stream") + } + return nil, errors.New("xdsChannel is closed") + } + + // Lookup the resource parser based on the resource type. + rType := xc.resourceTypeGetter(resp.TypeURL) + if rType == nil { + return nil, xdsresource.NewErrorf(xdsresource.ErrorTypeResourceTypeUnsupported, "Resource type URL %q unknown in response from server", resp.TypeURL) + } + + // Decode the resources and build the list of resource names to return. + opts := &xdsresource.DecodeOptions{ + BootstrapConfig: xc.bootstrapConfig, + ServerConfig: xc.serverConfig, + } + updates, md, err := decodeResponse(opts, rType, resp) + var names []string + for name := range updates { + names = append(names, name) + } + + xc.eventHandler.adsResourceUpdate(rType, updates, md, onDone) + return names, err +} + +// decodeResponse decodes the resources in the given ADS response. +// +// The opts parameter provides configuration options for decoding the resources. +// The rType parameter specifies the resource type parser to use for decoding +// the resources. +// +// The returned map contains a key for each resource in the response, with the +// value being either the decoded resource data or an error if decoding failed. +// The returned metadata includes the version of the response, the timestamp of +// the update, and the status of the update (ACKed or NACKed). +// +// If there are any errors decoding the resources, the metadata will indicate +// that the update was NACKed, and the returned error will contain information +// about all errors encountered by this function. +func decodeResponse(opts *xdsresource.DecodeOptions, rType xdsresource.Type, resp ads.Response) (map[string]ads.DataAndErrTuple, xdsresource.UpdateMetadata, error) { + timestamp := time.Now() + md := xdsresource.UpdateMetadata{ + Version: resp.Version, + Timestamp: timestamp, + } + + topLevelErrors := make([]error, 0) // Tracks deserialization errors, where we don't have a resource name. + perResourceErrors := make(map[string]error) // Tracks resource validation errors, where we have a resource name. + ret := make(map[string]ads.DataAndErrTuple) // Return result, a map from resource name to either resource data or error. + for _, r := range resp.Resources { + result, err := rType.Decode(opts, r) + + // Name field of the result is left unpopulated only when resource + // deserialization fails. + name := "" + if result != nil { + name = xdsresource.ParseName(result.Name).String() + } + if err == nil { + ret[name] = ads.DataAndErrTuple{Resource: result.Resource} + continue + } + if name == "" { + topLevelErrors = append(topLevelErrors, err) + continue + } + perResourceErrors[name] = err + // Add place holder in the map so we know this resource name was in + // the response. + ret[name] = ads.DataAndErrTuple{Err: err} + } + + if len(topLevelErrors) == 0 && len(perResourceErrors) == 0 { + md.Status = xdsresource.ServiceStatusACKed + return ret, md, nil + } + + md.Status = xdsresource.ServiceStatusNACKed + errRet := combineErrors(rType.TypeName(), topLevelErrors, perResourceErrors) + md.ErrState = &xdsresource.UpdateErrorMetadata{ + Version: resp.Version, + Err: errRet, + Timestamp: timestamp, + } + return ret, md, errRet +} diff --git a/xds/internal/xdsclient/channel_test.go b/xds/internal/xdsclient/channel_test.go new file mode 100644 index 000000000000..5d57d50a1847 --- /dev/null +++ b/xds/internal/xdsclient/channel_test.go @@ -0,0 +1,972 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/envoyproxy/go-control-plane/pkg/wellknown" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/google/uuid" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/internal/testutils/xds/fakeserver" + "google.golang.org/grpc/internal/xds/bootstrap" + xdsinternal "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/grpc/xds/internal/httpfilter/router" + "google.golang.org/grpc/xds/internal/xdsclient/transport" + "google.golang.org/grpc/xds/internal/xdsclient/transport/ads" + "google.golang.org/grpc/xds/internal/xdsclient/transport/grpctransport" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/durationpb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3routerpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" +) + +// Lookup the listener resource type from the resource type map. This is used to +// parse listener resources used in this test. +var listenerType = xdsinternal.ResourceTypeMapForTesting[version.V3ListenerURL].(xdsresource.Type) + +// xdsChannelForTest creates an xdsChannel to the specified serverURI for +// testing purposes. +func xdsChannelForTest(t *testing.T, serverURI, nodeID string, watchExpiryTimeout time.Duration) *xdsChannel { + t.Helper() + + // Create server configuration for the above management server. + serverCfg, err := bootstrap.ServerConfigForTesting(bootstrap.ServerConfigTestingOptions{URI: serverURI}) + if err != nil { + t.Fatalf("Failed to create server config for testing: %v", err) + } + + // Create a grpc transport to the above management server. + tr, err := (&grpctransport.Builder{}).Build(transport.BuildOptions{ServerConfig: serverCfg}) + if err != nil { + t.Fatalf("Failed to create a transport for server config %s: %v", serverCfg, err) + } + + // Create bootstrap configuration with the top-level xds servers + // field containing the server configuration for the above + // management server. + contents, err := bootstrap.NewContentsForTesting(bootstrap.ConfigOptionsForTesting{ + Servers: []byte(fmt.Sprintf(`[{ + "server_uri": %q, + "channel_creds": [{"type": "insecure"}] + }]`, serverURI)), + Node: []byte(fmt.Sprintf(`{"id": "%s"}`, nodeID)), + }) + if err != nil { + t.Fatalf("Failed to create bootstrap contents: %v", err) + } + bootstrapCfg, err := bootstrap.NewConfigForTesting(contents) + if err != nil { + t.Fatalf("Failed to create bootstrap configuration: %v", err) + } + + // Create an xdsChannel that uses everything set up above. + xc, err := newXDSChannel(xdsChannelOpts{ + transport: tr, + serverConfig: serverCfg, + bootstrapConfig: bootstrapCfg, + resourceTypeGetter: func(typeURL string) xdsresource.Type { + if typeURL != "type.googleapis.com/envoy.config.listener.v3.Listener" { + return nil + } + return listenerType + }, + eventHandler: newTestEventHandler(), + watchExpiryTimeout: watchExpiryTimeout, + }) + if err != nil { + t.Fatalf("Failed to create xdsChannel: %v", err) + } + t.Cleanup(func() { xc.close() }) + return xc +} + +// verifyUpdateAndMetadata verifies that the event handler received the expected +// updates and metadata. It checks that the received resource type matches the +// expected type, and that the received updates and metadata match the expected +// values. The function ignores the timestamp fields in the metadata, as those +// are expected to be different. +func verifyUpdateAndMetadata(ctx context.Context, t *testing.T, eh *testEventHandler, wantUpdates map[string]ads.DataAndErrTuple, wantMD xdsresource.UpdateMetadata) { + t.Helper() + + gotTyp, gotUpdates, gotMD, err := eh.waitForUpdate(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for update callback to be invoked on the event handler") + } + + if gotTyp != listenerType { + t.Fatalf("Got resource type %v, want %v", gotTyp, listenerType) + } + opts := cmp.Options{ + protocmp.Transform(), + cmpopts.EquateEmpty(), + cmpopts.EquateErrors(), + cmpopts.IgnoreFields(xdsresource.UpdateMetadata{}, "Timestamp"), + cmpopts.IgnoreFields(xdsresource.UpdateErrorMetadata{}, "Timestamp"), + } + if diff := cmp.Diff(wantUpdates, gotUpdates, opts); diff != "" { + t.Fatalf("Got unexpected diff in update (-want +got):\n%s\n want: %+v\n got: %+v", diff, wantUpdates, gotUpdates) + } + if diff := cmp.Diff(wantMD, gotMD, opts); diff != "" { + t.Fatalf("Got unexpected diff in update (-want +got):\n%s\n want: %v\n got: %v", diff, wantMD, gotMD) + } +} + +// Tests different failure cases when creating a new xdsChannel. It checks that +// the xdsChannel creation fails when any of the required options (transport, +// serverConfig, bootstrapConfig, or resourceTypeGetter) are missing or nil. +func (s) TestChannel_New_FailureCases(t *testing.T) { + type fakeTransport struct { + transport.Interface + } + + tests := []struct { + name string + opts xdsChannelOpts + wantErrStr string + }{ + { + name: "emptyTransport", + opts: xdsChannelOpts{}, + wantErrStr: "transport is nil", + }, + { + name: "emptyServerConfig", + opts: xdsChannelOpts{transport: &fakeTransport{}}, + wantErrStr: "serverConfig is nil", + }, + { + name: "emptyBootstrapConfig", + opts: xdsChannelOpts{ + transport: &fakeTransport{}, + serverConfig: &bootstrap.ServerConfig{}, + }, + wantErrStr: "bootstrapConfig is nil", + }, + { + name: "emptyResourceTypeGetter", + opts: xdsChannelOpts{ + transport: &fakeTransport{}, + serverConfig: &bootstrap.ServerConfig{}, + bootstrapConfig: &bootstrap.Config{}, + }, + wantErrStr: "resourceTypeGetter is nil", + }, + { + name: "emptyEventHandler", + opts: xdsChannelOpts{ + transport: &fakeTransport{}, + serverConfig: &bootstrap.ServerConfig{}, + bootstrapConfig: &bootstrap.Config{}, + resourceTypeGetter: func(string) xdsresource.Type { return nil }, + }, + wantErrStr: "eventHandler is nil", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if _, err := newXDSChannel(test.opts); err == nil || !strings.Contains(err.Error(), test.wantErrStr) { + t.Fatalf("newXDSChannel() = %v, want %q", err, test.wantErrStr) + } + }) + } +} + +// Tests different scenarios of the xdsChannel receiving a response from the +// management server. In all scenarios, the xdsChannel is expected to pass the +// received responses as-is to the resource parsing functionality specified by +// the resourceTypeGetter. +func (s) TestChannel_ADS_HandleResponseFromManagementServer(t *testing.T) { + const ( + listenerName1 = "listener-name-1" + listenerName2 = "listener-name-2" + routeName = "route-name" + clusterName = "cluster-name" + ) + var ( + badlyMarshaledResource = &anypb.Any{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Value: []byte{1, 2, 3, 4}, + } + apiListener = &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny(t, &v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{"*"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + }}}}}}}, + }, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, + CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ + MaxStreamDuration: durationpb.New(time.Second), + }, + }), + } + listener1 = testutils.MarshalAny(t, &v3listenerpb.Listener{ + Name: listenerName1, + ApiListener: apiListener, + }) + listener2 = testutils.MarshalAny(t, &v3listenerpb.Listener{ + Name: listenerName2, + ApiListener: apiListener, + }) + ) + + tests := []struct { + desc string + resourceNamesToRequest []string + managementServerResponse *v3discoverypb.DiscoveryResponse + wantUpdates map[string]ads.DataAndErrTuple + wantMD xdsresource.UpdateMetadata + wantErr error + }{ + { + desc: "one bad resource - deserialization failure", + resourceNamesToRequest: []string{listenerName1}, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + VersionInfo: "0", + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Resources: []*anypb.Any{badlyMarshaledResource}, + }, + wantUpdates: nil, // No updates expected as the response runs into unmarshaling errors. + wantMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + Version: "0", + ErrState: &xdsresource.UpdateErrorMetadata{ + Version: "0", + Err: cmpopts.AnyError, + }, + }, + wantErr: cmpopts.AnyError, + }, + { + desc: "one bad resource - validation failure", + resourceNamesToRequest: []string{listenerName1}, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + VersionInfo: "0", + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Resources: []*anypb.Any{testutils.MarshalAny(t, &v3listenerpb.Listener{ + Name: listenerName1, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny(t, &v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{}, + }), + }, + })}, + }, + wantUpdates: map[string]ads.DataAndErrTuple{ + listenerName1: { + Err: cmpopts.AnyError, + }, + }, + wantMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + Version: "0", + ErrState: &xdsresource.UpdateErrorMetadata{ + Version: "0", + Err: cmpopts.AnyError, + }, + }, + }, + { + desc: "two bad resources", + resourceNamesToRequest: []string{listenerName1, listenerName2}, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + VersionInfo: "0", + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Resources: []*anypb.Any{ + badlyMarshaledResource, + testutils.MarshalAny(t, &v3listenerpb.Listener{ + Name: listenerName2, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny(t, &v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{}, + }), + }, + }), + }, + }, + wantUpdates: map[string]ads.DataAndErrTuple{ + listenerName2: { + Err: cmpopts.AnyError, + }, + }, + wantMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + Version: "0", + ErrState: &xdsresource.UpdateErrorMetadata{ + Version: "0", + Err: cmpopts.AnyError, + }, + }, + }, + { + desc: "one good resource", + resourceNamesToRequest: []string{listenerName1}, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + VersionInfo: "0", + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Resources: []*anypb.Any{listener1}, + }, + wantUpdates: map[string]ads.DataAndErrTuple{ + listenerName1: { + Resource: &xdsresource.ListenerResourceData{Resource: xdsresource.ListenerUpdate{ + InlineRouteConfig: &xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{{ + Domains: []string{"*"}, + Routes: []*xdsresource.Route{{ + Prefix: newStringP("/"), + WeightedClusters: map[string]xdsresource.WeightedCluster{clusterName: {Weight: 1}}, + ActionType: xdsresource.RouteActionRoute}, + }, + }}}, + MaxStreamDuration: time.Second, + Raw: listener1, + HTTPFilters: makeRouterFilterList(t), + }}, + }, + }, + wantMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, + Version: "0", + }, + }, + { + desc: "one good and one bad - deserialization failure", + resourceNamesToRequest: []string{listenerName1, listenerName2}, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + VersionInfo: "0", + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Resources: []*anypb.Any{ + badlyMarshaledResource, + listener2, + }, + }, + wantUpdates: map[string]ads.DataAndErrTuple{ + listenerName2: { + Resource: &xdsresource.ListenerResourceData{Resource: xdsresource.ListenerUpdate{ + InlineRouteConfig: &xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{{ + Domains: []string{"*"}, + Routes: []*xdsresource.Route{{ + Prefix: newStringP("/"), + WeightedClusters: map[string]xdsresource.WeightedCluster{clusterName: {Weight: 1}}, + ActionType: xdsresource.RouteActionRoute}, + }, + }}}, + MaxStreamDuration: time.Second, + Raw: listener2, + HTTPFilters: makeRouterFilterList(t), + }}, + }, + }, + wantMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + Version: "0", + ErrState: &xdsresource.UpdateErrorMetadata{ + Version: "0", + Err: cmpopts.AnyError, + }, + }, + }, + { + desc: "one good and one bad - validation failure", + resourceNamesToRequest: []string{listenerName1, listenerName2}, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + VersionInfo: "0", + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Resources: []*anypb.Any{ + testutils.MarshalAny(t, &v3listenerpb.Listener{ + Name: listenerName1, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny(t, &v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{}, + }), + }, + }), + listener2, + }, + }, + wantUpdates: map[string]ads.DataAndErrTuple{ + listenerName1: {Err: cmpopts.AnyError}, + listenerName2: { + Resource: &xdsresource.ListenerResourceData{Resource: xdsresource.ListenerUpdate{ + InlineRouteConfig: &xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{{ + Domains: []string{"*"}, + Routes: []*xdsresource.Route{{ + Prefix: newStringP("/"), + WeightedClusters: map[string]xdsresource.WeightedCluster{clusterName: {Weight: 1}}, + ActionType: xdsresource.RouteActionRoute}, + }, + }}}, + MaxStreamDuration: time.Second, + Raw: listener2, + HTTPFilters: makeRouterFilterList(t), + }}, + }, + }, + wantMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + Version: "0", + ErrState: &xdsresource.UpdateErrorMetadata{ + Version: "0", + Err: cmpopts.AnyError, + }, + }, + }, + { + desc: "two good resources", + resourceNamesToRequest: []string{listenerName1, listenerName2}, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + VersionInfo: "0", + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Resources: []*anypb.Any{listener1, listener2}, + }, + wantUpdates: map[string]ads.DataAndErrTuple{ + listenerName1: { + Resource: &xdsresource.ListenerResourceData{Resource: xdsresource.ListenerUpdate{ + InlineRouteConfig: &xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{{ + Domains: []string{"*"}, + Routes: []*xdsresource.Route{{ + Prefix: newStringP("/"), + WeightedClusters: map[string]xdsresource.WeightedCluster{clusterName: {Weight: 1}}, + ActionType: xdsresource.RouteActionRoute}, + }, + }}}, + MaxStreamDuration: time.Second, + Raw: listener1, + HTTPFilters: makeRouterFilterList(t), + }}, + }, + listenerName2: { + Resource: &xdsresource.ListenerResourceData{Resource: xdsresource.ListenerUpdate{ + InlineRouteConfig: &xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{{ + Domains: []string{"*"}, + Routes: []*xdsresource.Route{{ + Prefix: newStringP("/"), + WeightedClusters: map[string]xdsresource.WeightedCluster{clusterName: {Weight: 1}}, + ActionType: xdsresource.RouteActionRoute}, + }, + }}}, + MaxStreamDuration: time.Second, + Raw: listener2, + HTTPFilters: makeRouterFilterList(t), + }}, + }, + }, + wantMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, + Version: "0", + }, + }, + { + desc: "two resources when we requested one", + resourceNamesToRequest: []string{listenerName1}, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + VersionInfo: "0", + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Resources: []*anypb.Any{listener1, listener2}, + }, + wantUpdates: map[string]ads.DataAndErrTuple{ + listenerName1: { + Resource: &xdsresource.ListenerResourceData{Resource: xdsresource.ListenerUpdate{ + InlineRouteConfig: &xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{{ + Domains: []string{"*"}, + Routes: []*xdsresource.Route{{ + Prefix: newStringP("/"), + WeightedClusters: map[string]xdsresource.WeightedCluster{clusterName: {Weight: 1}}, + ActionType: xdsresource.RouteActionRoute}, + }, + }}}, + MaxStreamDuration: time.Second, + Raw: listener1, + HTTPFilters: makeRouterFilterList(t), + }}, + }, + listenerName2: { + Resource: &xdsresource.ListenerResourceData{Resource: xdsresource.ListenerUpdate{ + InlineRouteConfig: &xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{{ + Domains: []string{"*"}, + Routes: []*xdsresource.Route{{ + Prefix: newStringP("/"), + WeightedClusters: map[string]xdsresource.WeightedCluster{clusterName: {Weight: 1}}, + ActionType: xdsresource.RouteActionRoute}, + }, + }}}, + MaxStreamDuration: time.Second, + Raw: listener2, + HTTPFilters: makeRouterFilterList(t), + }}, + }, + }, + wantMD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, + Version: "0", + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Start a fake xDS management server and configure the response it + // would send to its client. + mgmtServer, cleanup, err := fakeserver.StartServer(nil) + if err != nil { + t.Fatalf("Failed to start fake xDS server: %v", err) + } + defer cleanup() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + mgmtServer.XDSResponseChan <- &fakeserver.Response{Resp: test.managementServerResponse} + + // Create an xdsChannel for the test with a long watch expiry timer + // to ensure that watches don't expire for the duration of the test. + nodeID := uuid.New().String() + xc := xdsChannelForTest(t, mgmtServer.Address, nodeID, 2*defaultTestTimeout) + defer xc.close() + + // Subscribe to the resources specified in the test table. + for _, name := range test.resourceNamesToRequest { + xc.subscribe(listenerType, name) + } + + // Wait for an update callback on the event handler and verify the + // contents of the update and the metadata. + verifyUpdateAndMetadata(ctx, t, xc.eventHandler.(*testEventHandler), test.wantUpdates, test.wantMD) + }) + } +} + +// Tests that the xdsChannel correctly handles the expiry of a watch for a +// resource by ensuring that the watch expiry callback is invoked on the event +// handler with the expected resource type and name. +func (s) TestChannel_ADS_HandleResponseWatchExpiry(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Start an xDS management server, but do not configure any resources on it. + // This will result in the watch for a resource to timeout. + mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{}) + + // Create an xdsChannel for the test with a short watch expiry timer to + // ensure that the test does not run very long, as it needs to wait for the + // watch to expire. + nodeID := uuid.New().String() + xc := xdsChannelForTest(t, mgmtServer.Address, nodeID, 2*defaultTestShortTimeout) + defer xc.close() + + // Subscribe to a listener resource. + const listenerName = "listener-name" + xc.subscribe(listenerType, listenerName) + + // Wait for the watch expiry callback on the authority to be invoked and + // verify that the watch expired for the expected resource name and type. + eventHandler := xc.eventHandler.(*testEventHandler) + gotTyp, gotName, err := eventHandler.waitForResourceDoesNotExist(ctx) + if err != nil { + t.Fatal("Timeout when waiting for the watch expiry callback to be invoked on the xDS client") + } + + if gotTyp != listenerType { + t.Fatalf("Got type %v, want %v", gotTyp, listenerType) + } + if gotName != listenerName { + t.Fatalf("Got name %v, want %v", gotName, listenerName) + } +} + +// Tests that the xdsChannel correctly handles stream failures by ensuring that +// the stream failure callback is invoked on the event handler. +func (s) TestChannel_ADS_StreamFailure(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Start an xDS management server with a restartable listener to simulate + // connection failures. + l, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("net.Listen() failed: %v", err) + } + lis := testutils.NewRestartableListener(l) + mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{Listener: lis}) + + // Configure a listener resource on the management server. + const listenerResourceName = "test-listener-resource" + const routeConfigurationName = "test-route-configuration-resource" + nodeID := uuid.New().String() + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(listenerResourceName, routeConfigurationName)}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Create an xdsChannel for the test with a long watch expiry timer + // to ensure that watches don't expire for the duration of the test. + xc := xdsChannelForTest(t, mgmtServer.Address, nodeID, 2*defaultTestTimeout) + defer xc.close() + + // Subscribe to the resource created above. + xc.subscribe(listenerType, listenerResourceName) + + // Wait for an update callback on the event handler and verify the + // contents of the update and the metadata. + hcm := testutils.MarshalAny(t, &v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: routeConfigurationName, + }}, + HttpFilters: []*v3httppb.HttpFilter{e2e.HTTPFilter("router", &v3routerpb.Router{})}, + }) + listenerResource, err := anypb.New(&v3listenerpb.Listener{ + Name: listenerResourceName, + ApiListener: &v3listenerpb.ApiListener{ApiListener: hcm}, + FilterChains: []*v3listenerpb.FilterChain{{ + Name: "filter-chain-name", + Filters: []*v3listenerpb.Filter{{ + Name: wellknown.HTTPConnectionManager, + ConfigType: &v3listenerpb.Filter_TypedConfig{TypedConfig: hcm}, + }}, + }}, + }) + if err != nil { + t.Fatalf("Failed to create listener resource: %v", err) + } + + wantUpdates := map[string]ads.DataAndErrTuple{ + listenerResourceName: { + Resource: &xdsresource.ListenerResourceData{ + Resource: xdsresource.ListenerUpdate{ + RouteConfigName: routeConfigurationName, + HTTPFilters: makeRouterFilterList(t), + Raw: listenerResource, + }, + }, + }, + } + wantMD := xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusACKed, + Version: "1", + } + + eventHandler := xc.eventHandler.(*testEventHandler) + verifyUpdateAndMetadata(ctx, t, eventHandler, wantUpdates, wantMD) + + lis.Stop() + if err := eventHandler.waitForStreamFailure(ctx); err != nil { + t.Fatalf("Timeout when waiting for the stream failure callback to be invoked on the xDS client: %v", err) + } +} + +// Tests the behavior of the xdsChannel when a resource is unsubscribed. +// Verifies that when a previously subscribed resource is unsubscribed, a +// request is sent without the previously subscribed resource name. +func (s) TestChannel_ADS_ResourceUnsubscribe(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Start an xDS management server that uses a channel to inform the test + // about the specific LDS resource names being requested. + ldsResourcesCh := make(chan []string, 1) + mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + t.Logf("Received request for resources: %v of type %s", req.GetResourceNames(), req.GetTypeUrl()) + + if req.TypeUrl != version.V3ListenerURL { + return fmt.Errorf("unexpected resource type URL: %q", req.TypeUrl) + } + + // Make the most recently requested names available to the test. + ldsResourcesCh <- req.GetResourceNames() + return nil + }, + }) + + // Configure two listener resources on the management server. + const listenerResourceName1 = "test-listener-resource-1" + const routeConfigurationName1 = "test-route-configuration-resource-1" + const listenerResourceName2 = "test-listener-resource-2" + const routeConfigurationName2 = "test-route-configuration-resource-2" + nodeID := uuid.New().String() + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{ + e2e.DefaultClientListener(listenerResourceName1, routeConfigurationName1), + e2e.DefaultClientListener(listenerResourceName2, routeConfigurationName2), + }, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Create an xdsChannel for the test with a long watch expiry timer + // to ensure that watches don't expire for the duration of the test. + xc := xdsChannelForTest(t, mgmtServer.Address, nodeID, 2*defaultTestTimeout) + defer xc.close() + + // Subscribe to the resources created above and verify that a request is + // sent for the same. + xc.subscribe(listenerType, listenerResourceName1) + xc.subscribe(listenerType, listenerResourceName2) + if err := waitForResourceNames(ctx, t, ldsResourcesCh, []string{listenerResourceName1, listenerResourceName2}); err != nil { + t.Fatal(err) + } + + // Wait for the above resources to be ACKed. + if err := waitForResourceNames(ctx, t, ldsResourcesCh, []string{listenerResourceName1, listenerResourceName2}); err != nil { + t.Fatal(err) + } + + // Unsubscribe to one of the resources created above, and ensure that the + // other resource is still being requested. + xc.unsubscribe(listenerType, listenerResourceName1) + if err := waitForResourceNames(ctx, t, ldsResourcesCh, []string{listenerResourceName2}); err != nil { + t.Fatal(err) + } + + // Since the version on the management server for the above resource is not + // changed, we will not receive an update from it for the one resource that + // we are still requesting. + + // Unsubscribe to the remaining resource, and ensure that no more resources + // are being requested. + xc.unsubscribe(listenerType, listenerResourceName2) + if err := waitForResourceNames(ctx, t, ldsResourcesCh, []string{}); err != nil { + t.Fatal(err) + } +} + +// Tests the load reporting functionality of the xdsChannel. It creates an +// xdsChannel, starts load reporting, and verifies that an LRS streaming RPC is +// created. It then makes another call to the load reporting API and ensures +// that a new LRS stream is not created. Finally, it cancels the load reporting +// calls and ensures that the stream is closed when the last call is canceled. +// +// Note that this test does not actually report any load. That is already tested +// by an e2e style test in the xdsclient package. +func (s) TestChannel_LRS_ReportLoad(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Create a management server that serves LRS. + mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{SupportLoadReportingService: true}) + + // Create an xdsChannel for the test. Node id and watch expiry timer don't + // matter for LRS. + xc := xdsChannelForTest(t, mgmtServer.Address, "", defaultTestTimeout) + defer xc.close() + + // Start load reporting and verify that an LRS streaming RPC is created. + _, stopLRS1 := xc.reportLoad() + lrsServer := mgmtServer.LRSServer + if _, err := lrsServer.LRSStreamOpenChan.Receive(ctx); err != nil { + t.Fatalf("Timeout when waiting for an LRS streaming RPC to be created: %v", err) + } + + // Make another call to the load reporting API, and ensure that a new LRS + // stream is not created. + _, stopLRS2 := xc.reportLoad() + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if _, err := lrsServer.LRSStreamOpenChan.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("New LRS streaming RPC created when expected to use an existing one") + } + + // Cancel the first load reporting call, and ensure that the stream does not + // close (because we have another call open). + stopLRS1() + sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if _, err := lrsServer.LRSStreamCloseChan.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("LRS stream closed when expected to stay open") + } + + // Cancel the second load reporting call, and ensure the stream is closed. + stopLRS2() + if _, err := lrsServer.LRSStreamCloseChan.Receive(ctx); err != nil { + t.Fatal("Timeout waiting for LRS stream to close") + } +} + +// waitForResourceNames waits for the wantNames to be received on namesCh. +// Returns a non-nil error if the context expires before that. +func waitForResourceNames(ctx context.Context, t *testing.T, namesCh chan []string, wantNames []string) error { + t.Helper() + + var lastRequestedNames []string + for ; ; <-time.After(defaultTestShortTimeout) { + select { + case <-ctx.Done(): + return fmt.Errorf("timeout waiting for resources %v to be requested from the management server. Last requested resources: %v", wantNames, lastRequestedNames) + case gotNames := <-namesCh: + if cmp.Equal(gotNames, wantNames, cmpopts.EquateEmpty(), cmpopts.SortSlices(func(s1, s2 string) bool { return s1 < s2 })) { + return nil + } + lastRequestedNames = gotNames + } + } +} + +// newTestEventHandler creates a new testEventHandler instance with the +// necessary channels for testing the xdsChannel. +func newTestEventHandler() *testEventHandler { + return &testEventHandler{ + typeCh: make(chan xdsresource.Type, 1), + updateCh: make(chan map[string]ads.DataAndErrTuple, 1), + mdCh: make(chan xdsresource.UpdateMetadata, 1), + nameCh: make(chan string, 1), + connErrCh: make(chan error, 1), + } +} + +// testEventHandler is a struct that implements the xdsChannelEventhandler +// interface. It is used to receive events from an xdsChannel, and has multiple +// channels on which it makes these events available to the test. +type testEventHandler struct { + typeCh chan xdsresource.Type // Resource type of an update or resource-does-not-exist error. + updateCh chan map[string]ads.DataAndErrTuple // Resource updates. + mdCh chan xdsresource.UpdateMetadata // Metadata from an update. + nameCh chan string // Name of the non-existent resource. + connErrCh chan error // Connectivity error. + +} + +func (ta *testEventHandler) adsStreamFailure(err error) { + ta.connErrCh <- err +} + +func (ta *testEventHandler) waitForStreamFailure(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ta.connErrCh: + } + return nil +} + +func (ta *testEventHandler) adsResourceUpdate(typ xdsresource.Type, updates map[string]ads.DataAndErrTuple, md xdsresource.UpdateMetadata, onDone func()) { + ta.typeCh <- typ + ta.updateCh <- updates + ta.mdCh <- md + onDone() +} + +// waitForUpdate waits for the next resource update event from the xdsChannel. +// It returns the resource type, the resource updates, and the update metadata. +// If the context is canceled, it returns an error. +func (ta *testEventHandler) waitForUpdate(ctx context.Context) (xdsresource.Type, map[string]ads.DataAndErrTuple, xdsresource.UpdateMetadata, error) { + var typ xdsresource.Type + var updates map[string]ads.DataAndErrTuple + var md xdsresource.UpdateMetadata + + select { + case typ = <-ta.typeCh: + case <-ctx.Done(): + return nil, nil, xdsresource.UpdateMetadata{}, ctx.Err() + } + + select { + case updates = <-ta.updateCh: + case <-ctx.Done(): + return nil, nil, xdsresource.UpdateMetadata{}, ctx.Err() + } + + select { + case md = <-ta.mdCh: + case <-ctx.Done(): + return nil, nil, xdsresource.UpdateMetadata{}, ctx.Err() + } + return typ, updates, md, nil +} + +func (ta *testEventHandler) adsResourceDoesNotExist(typ xdsresource.Type, name string) { + ta.typeCh <- typ + ta.nameCh <- name +} + +// waitForResourceDoesNotExist waits for the next resource-does-not-exist event +// from the xdsChannel. It returns the resource type and the resource name. If +// the context is canceled, it returns an error. +func (ta *testEventHandler) waitForResourceDoesNotExist(ctx context.Context) (xdsresource.Type, string, error) { + var typ xdsresource.Type + var name string + + select { + case typ = <-ta.typeCh: + case <-ctx.Done(): + return nil, "", ctx.Err() + } + + select { + case name = <-ta.nameCh: + case <-ctx.Done(): + return nil, "", ctx.Err() + } + return typ, name, nil +} + +func newStringP(s string) *string { + return &s +} + +func makeRouterFilter(t *testing.T) xdsresource.HTTPFilter { + routerBuilder := httpfilter.Get(router.TypeURL) + routerConfig, _ := routerBuilder.ParseFilterConfig(testutils.MarshalAny(t, &v3routerpb.Router{})) + return xdsresource.HTTPFilter{Name: "router", Filter: routerBuilder, Config: routerConfig} +} + +func makeRouterFilterList(t *testing.T) []xdsresource.HTTPFilter { + return []xdsresource.HTTPFilter{makeRouterFilter(t)} +} diff --git a/xds/internal/xdsclient/transport/ads/ads_stream.go b/xds/internal/xdsclient/transport/ads/ads_stream.go index a3a17144d711..457bb3a171a2 100644 --- a/xds/internal/xdsclient/transport/ads/ads_stream.go +++ b/xds/internal/xdsclient/transport/ads/ads_stream.go @@ -54,6 +54,13 @@ type Response struct { Resources []*anypb.Any } +// DataAndErrTuple is a struct that holds a resource and an error. It is used to +// return a resource and any associated error from a function. +type DataAndErrTuple struct { + Resource xdsresource.ResourceData + Err error +} + // StreamEventHandler is an interface that defines the callbacks for events that // occur on the ADS stream. Methods on this interface may be invoked // concurrently and implementations need to handle them in a thread-safe manner. diff --git a/xds/internal/xdsclient/xdsresource/cluster_resource_type.go b/xds/internal/xdsclient/xdsresource/cluster_resource_type.go index 18d47cbc101d..8e9375fcbbec 100644 --- a/xds/internal/xdsclient/xdsresource/cluster_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/cluster_resource_type.go @@ -86,8 +86,8 @@ type ClusterResourceData struct { Resource ClusterUpdate } -// Equal returns true if other is equal to r. -func (c *ClusterResourceData) Equal(other ResourceData) bool { +// RawEqual returns true if other is equal to r. +func (c *ClusterResourceData) RawEqual(other ResourceData) bool { if c == nil && other == nil { return true } diff --git a/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go b/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go index 66c0ae0b2022..94c03d0c5228 100644 --- a/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go @@ -81,8 +81,8 @@ type EndpointsResourceData struct { Resource EndpointsUpdate } -// Equal returns true if other is equal to r. -func (e *EndpointsResourceData) Equal(other ResourceData) bool { +// RawEqual returns true if other is equal to r. +func (e *EndpointsResourceData) RawEqual(other ResourceData) bool { if e == nil && other == nil { return true } diff --git a/xds/internal/xdsclient/xdsresource/listener_resource_type.go b/xds/internal/xdsclient/xdsresource/listener_resource_type.go index 80fa5e6a21ec..e3ca1134a07b 100644 --- a/xds/internal/xdsclient/xdsresource/listener_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/listener_resource_type.go @@ -118,8 +118,8 @@ type ListenerResourceData struct { Resource ListenerUpdate } -// Equal returns true if other is equal to l. -func (l *ListenerResourceData) Equal(other ResourceData) bool { +// RawEqual returns true if other is equal to l. +func (l *ListenerResourceData) RawEqual(other ResourceData) bool { if l == nil && other == nil { return true } diff --git a/xds/internal/xdsclient/xdsresource/resource_type.go b/xds/internal/xdsclient/xdsresource/resource_type.go index 55cfd6fbb15b..0e398cad9126 100644 --- a/xds/internal/xdsclient/xdsresource/resource_type.go +++ b/xds/internal/xdsclient/xdsresource/resource_type.go @@ -121,9 +121,9 @@ type Type interface { type ResourceData interface { isResourceData() - // Equal returns true if the passed in resource data is equal to that of the - // receiver. - Equal(ResourceData) bool + // RawEqual returns true if the passed in resource data is equal to that of + // the receiver, based on the underlying raw protobuf message. + RawEqual(ResourceData) bool // ToJSON returns a JSON string representation of the resource data. ToJSON() string diff --git a/xds/internal/xdsclient/xdsresource/route_config_resource_type.go b/xds/internal/xdsclient/xdsresource/route_config_resource_type.go index ed32abb8333d..98ac313288a2 100644 --- a/xds/internal/xdsclient/xdsresource/route_config_resource_type.go +++ b/xds/internal/xdsclient/xdsresource/route_config_resource_type.go @@ -81,8 +81,8 @@ type RouteConfigResourceData struct { Resource RouteConfigUpdate } -// Equal returns true if other is equal to r. -func (r *RouteConfigResourceData) Equal(other ResourceData) bool { +// RawEqual returns true if other is equal to r. +func (r *RouteConfigResourceData) RawEqual(other ResourceData) bool { if r == nil && other == nil { return true } From c4c8b113050ecd856b118b5a58bda24e17763d59 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 24 Oct 2024 10:02:09 -0700 Subject: [PATCH 23/57] xds/resolver: add a way to specify the xDS client to use for testing purposes (#7771) --- internal/internal.go | 14 ++++++++++++++ xds/internal/resolver/xds_resolver.go | 24 +++++++++++++++++++----- 2 files changed, 33 insertions(+), 5 deletions(-) diff --git a/internal/internal.go b/internal/internal.go index 20b4dc3d3536..88900fa9bbc6 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -149,6 +149,20 @@ var ( // other features, including the CSDS service. NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) + // NewXDSResolverWithClientForTesting creates a new xDS resolver builder + // using the provided xDS client instead of creating a new one using the + // bootstrap configuration specified by the supported environment variables. + // The resolver.Builder is meant to be used in conjunction with the + // grpc.WithResolvers DialOption. The resolver.Builder does not take + // ownership of the provided xDS client and it is the responsibility of the + // caller to close the client when no longer required. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error) + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment // variable. diff --git a/xds/internal/resolver/xds_resolver.go b/xds/internal/resolver/xds_resolver.go index b5d24e4bf214..de339a7c9b69 100644 --- a/xds/internal/resolver/xds_resolver.go +++ b/xds/internal/resolver/xds_resolver.go @@ -44,10 +44,10 @@ import ( // xdsresolver.Scheme const Scheme = "xds" -// newBuilderForTesting creates a new xds resolver builder using a specific xds -// bootstrap config, so tests can use multiple xds clients in different -// ClientConns at the same time. -func newBuilderForTesting(config []byte) (resolver.Builder, error) { +// newBuilderWithConfigForTesting creates a new xds resolver builder using a +// specific xds bootstrap config, so tests can use multiple xds clients in +// different ClientConns at the same time. +func newBuilderWithConfigForTesting(config []byte) (resolver.Builder, error) { return &xdsResolverBuilder{ newXDSClient: func(name string) (xdsclient.XDSClient, func(), error) { return xdsclient.NewForTesting(xdsclient.OptionsForTesting{Name: name, Contents: config}) @@ -55,9 +55,23 @@ func newBuilderForTesting(config []byte) (resolver.Builder, error) { }, nil } +// newBuilderWithClientForTesting creates a new xds resolver builder using the +// specific xDS client, so that tests have complete control over the exact +// specific xDS client being used. +func newBuilderWithClientForTesting(client xdsclient.XDSClient) (resolver.Builder, error) { + return &xdsResolverBuilder{ + newXDSClient: func(string) (xdsclient.XDSClient, func(), error) { + // Returning an empty close func here means that the responsibility + // of closing the client lies with the caller. + return client, func() {}, nil + }, + }, nil +} + func init() { resolver.Register(&xdsResolverBuilder{}) - internal.NewXDSResolverWithConfigForTesting = newBuilderForTesting + internal.NewXDSResolverWithConfigForTesting = newBuilderWithConfigForTesting + internal.NewXDSResolverWithClientForTesting = newBuilderWithClientForTesting rinternal.NewWRR = wrr.NewRandom rinternal.NewXDSClient = xdsclient.New From f8e5d8f704180341e3f1034bc503a324d05d91dc Mon Sep 17 00:00:00 2001 From: Paul Chesnais Date: Thu, 24 Oct 2024 13:14:00 -0400 Subject: [PATCH 24/57] mem: use slice capacity instead of length, to determine whether to pool buffers or directly allocate them (#7702) * Address #7631 by correctly pooling large-capacity buffers As the issue states, `mem.NewBuffer` would not pool buffers with a length below the pooling threshold but whose capacity is actually larger than the pooling threshold. This can lead to buffers being leaked. --------- Co-authored-by: Purnesh Dixit Co-authored-by: Easwar Swaminathan --- mem/buffers.go | 6 +++++- mem/buffers_test.go | 27 +++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/mem/buffers.go b/mem/buffers.go index 565ec0a13949..ecbf0b9a73ea 100644 --- a/mem/buffers.go +++ b/mem/buffers.go @@ -92,7 +92,11 @@ func newBuffer() *buffer { // // Note that the backing array of the given data is not copied. func NewBuffer(data *[]byte, pool BufferPool) Buffer { - if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) { + // Use the buffer's capacity instead of the length, otherwise buffers may + // not be reused under certain conditions. For example, if a large buffer + // is acquired from the pool, but fewer bytes than the buffering threshold + // are written to it, the buffer will not be returned to the pool. + if pool == nil || IsBelowBufferPoolingThreshold(cap(*data)) { return (SliceBuffer)(*data) } b := newBuffer() diff --git a/mem/buffers_test.go b/mem/buffers_test.go index c17995745209..2b0627da159d 100644 --- a/mem/buffers_test.go +++ b/mem/buffers_test.go @@ -98,6 +98,33 @@ func (s) TestBuffer_NewBufferRefAndFree(t *testing.T) { } } +func (s) TestBuffer_NewBufferHandlesShortBuffers(t *testing.T) { + const threshold = 100 + + // Update the pooling threshold, since that's what's being tested. + internal.SetBufferPoolingThresholdForTesting.(func(int))(threshold) + t.Cleanup(func() { + internal.SetBufferPoolingThresholdForTesting.(func(int))(0) + }) + + // Make a pool with a buffer whose capacity is larger than the pooling + // threshold, but whose length is less than the threshold. + b := make([]byte, threshold/2, threshold*2) + pool := &singleBufferPool{ + t: t, + data: &b, + } + + // Get a Buffer, then free it. If NewBuffer decided that the Buffer + // shouldn't get pooled, Free will be a noop and singleBufferPool will not + // have been updated. + mem.NewBuffer(&b, pool).Free() + + if pool.data != nil { + t.Fatalf("Buffer not returned to pool") + } +} + func (s) TestBuffer_FreeAfterFree(t *testing.T) { buf := newBuffer([]byte("abcd"), mem.NopBufferPool{}) if buf.Len() != 4 { From e0a730c111be689ed220f87f14c0ef7a0bc419a3 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 25 Oct 2024 09:00:38 -0700 Subject: [PATCH 25/57] clusterresolver: fix a comment in a test (#7776) --- .../e2e_test/aggregate_cluster_test.go | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/xds/internal/balancer/clusterresolver/e2e_test/aggregate_cluster_test.go b/xds/internal/balancer/clusterresolver/e2e_test/aggregate_cluster_test.go index b82e6d09202a..89f3d1bcac71 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/aggregate_cluster_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/aggregate_cluster_test.go @@ -112,7 +112,9 @@ func (s) TestAggregateCluster_WithTwoEDSClusters(t *testing.T) { return nil } if len(req.GetResourceNames()) == 0 { - // This is the case for ACKs. Do nothing here. + // This happens at the end of the test when the grpc channel is + // being shut down and it is no longer interested in xDS + // resources. return nil } select { @@ -489,11 +491,15 @@ func (s) TestAggregateCluster_WithEDSAndDNS(t *testing.T) { if req.GetTypeUrl() != version.V3EndpointsURL { return nil } - if len(req.GetResourceNames()) > 0 { - select { - case edsResourceCh <- req.GetResourceNames()[0]: - default: - } + if len(req.GetResourceNames()) == 0 { + // This happens at the end of the test when the grpc channel is + // being shut down and it is no longer interested in xDS + // resources. + return nil + } + select { + case edsResourceCh <- req.GetResourceNames()[0]: + default: } return nil }, From a82315c00fa4311dcd0df84522d97e68014f1314 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 25 Oct 2024 11:33:47 -0700 Subject: [PATCH 26/57] testutils: change ListenerWrapper to push the most recently accepted connection (#7772) --- internal/testutils/wrappers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/testutils/wrappers.go b/internal/testutils/wrappers.go index c9b596d8851c..1770b5237c4d 100644 --- a/internal/testutils/wrappers.go +++ b/internal/testutils/wrappers.go @@ -53,7 +53,7 @@ func (l *ListenerWrapper) Accept() (net.Conn, error) { } closeCh := NewChannel() conn := &ConnWrapper{Conn: c, CloseCh: closeCh} - l.NewConnCh.Send(conn) + l.NewConnCh.Replace(conn) return conn, nil } From 94e1b29a1ca2e146087af7cd6c1e04a1aea56a58 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 25 Oct 2024 13:42:20 -0700 Subject: [PATCH 27/57] vet: add dependency checks (#7766) --- .github/workflows/deps.yml | 43 ++++++++++++++++++++++++++++++++++++++ scripts/gen-deps.sh | 21 +++++++++++++++++++ 2 files changed, 64 insertions(+) create mode 100644 .github/workflows/deps.yml create mode 100755 scripts/gen-deps.sh diff --git a/.github/workflows/deps.yml b/.github/workflows/deps.yml new file mode 100644 index 000000000000..c510e0e35fbb --- /dev/null +++ b/.github/workflows/deps.yml @@ -0,0 +1,43 @@ +name: Dependency Changes + +# Trigger on PRs. +on: + pull_request: + +permissions: + contents: read + +jobs: + # Compare dependencies before and after this PR. + dependencies: + runs-on: ubuntu-latest + timeout-minutes: 10 + strategy: + fail-fast: true + + steps: + - name: Checkout repo + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: stable + cache-dependency-path: "**/*go.sum" + + # Run the commands to generate dependencies before and after and compare. + - name: Compare dependencies + run: | + BEFORE="$(mktemp -d)" + AFTER="$(mktemp -d)" + + scripts/gen-deps.sh "${AFTER}" + git checkout origin/master + scripts/gen-deps.sh "${BEFORE}" + + echo "Comparing dependencies..." + # Run grep in a sub-shell since bash does not support ! in the middle of a pipe + diff -u0 -r "${BEFORE}" "${AFTER}" | bash -c '! grep -v "@@"' + echo "No changes detected." diff --git a/scripts/gen-deps.sh b/scripts/gen-deps.sh new file mode 100755 index 000000000000..bc647c4d8cba --- /dev/null +++ b/scripts/gen-deps.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +set -e # Exit on error +set -o pipefail # Fail a pipe if any sub-command fails. + +if [[ "$#" -ne 1 || ! -d "$1" ]]; then + echo "Specify a valid output directory as the first parameter." + exit 1 +fi + +SCRIPTS_DIR="$(dirname "$0")" +OUTPUT_DIR="$1" + +cd "${SCRIPTS_DIR}/.." + +git ls-files -- '*.go' | grep -v '\(^\|/\)\(internal\|examples\|benchmark\|interop\|test\|testdata\)\(/\|$\)' | xargs dirname | sort -u | while read d; do + pushd "$d" > /dev/null + pkg="$(echo "$d" | sed 's;\.;grpc;' | sed 's;/;_;g')" + go list -deps . | sort >| "${OUTPUT_DIR}/$pkg" + popd > /dev/null +done From 67b9ebf4fc7264a16625a5cf9e25912855600520 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 25 Oct 2024 15:24:34 -0700 Subject: [PATCH 28/57] xdsclient: make sending requests more deterministic (#7774) --- .../xdsclient/transport/ads/ads_stream.go | 63 ++++++++++++------- 1 file changed, 39 insertions(+), 24 deletions(-) diff --git a/xds/internal/xdsclient/transport/ads/ads_stream.go b/xds/internal/xdsclient/transport/ads/ads_stream.go index 457bb3a171a2..e5f6cefe6159 100644 --- a/xds/internal/xdsclient/transport/ads/ads_stream.go +++ b/xds/internal/xdsclient/transport/ads/ads_stream.go @@ -102,6 +102,7 @@ type resourceTypeState struct { nonce string // Last received nonce. Should be reset when the stream breaks. bufferedRequests chan struct{} // Channel to buffer requests when writing is blocked. subscribedResources map[string]*ResourceWatchState // Map of subscribed resource names to their state. + pendingWrite bool // True if there is a pending write for this resource type. } // StreamImpl provides the functionality associated with an ADS (Aggregated @@ -203,6 +204,7 @@ func (s *StreamImpl) Subscribe(typ xdsresource.Type, name string) { // Create state for the newly subscribed resource. The watch timer will // be started when a request for this resource is actually sent out. state.subscribedResources[name] = &ResourceWatchState{State: ResourceWatchStateStarted} + state.pendingWrite = true // Send a request for the resource type with updated subscriptions. s.requestCh.Put(typ) @@ -233,6 +235,7 @@ func (s *StreamImpl) Unsubscribe(typ xdsresource.Type, name string) { rs.ExpiryTimer.Stop() } delete(state.subscribedResources, name) + state.pendingWrite = true // Send a request for the resource type with updated subscriptions. s.requestCh.Put(typ) @@ -346,17 +349,7 @@ func (s *StreamImpl) sendNew(stream transport.StreamingCall, typ xdsresource.Typ return nil } - names := resourceNames(state.subscribedResources) - if err := s.sendMessageLocked(stream, names, typ.TypeURL(), state.version, state.nonce, nil); err != nil { - return err - - } - select { - case <-state.bufferedRequests: - default: - } - s.startWatchTimersLocked(typ, names) - return nil + return s.sendMessageIfWritePendingLocked(stream, typ, state) } // sendExisting sends out discovery requests for existing resources when @@ -385,18 +378,10 @@ func (s *StreamImpl) sendExisting(stream transport.StreamingCall) error { continue } - names := resourceNames(state.subscribedResources) - if s.logger.V(2) { - s.logger.Infof("Re-requesting resources %v of type %q, as the stream has been recreated", names, typ.TypeURL()) - } - if err := s.sendMessageLocked(stream, names, typ.TypeURL(), state.version, state.nonce, nil); err != nil { + state.pendingWrite = true + if err := s.sendMessageIfWritePendingLocked(stream, typ, state); err != nil { return err } - select { - case <-state.bufferedRequests: - default: - } - s.startWatchTimersLocked(typ, names) } return nil } @@ -413,11 +398,9 @@ func (s *StreamImpl) sendBuffered(stream transport.StreamingCall) error { for typ, state := range s.resourceTypeState { select { case <-state.bufferedRequests: - names := resourceNames(state.subscribedResources) - if err := s.sendMessageLocked(stream, names, typ.TypeURL(), state.version, state.nonce, nil); err != nil { + if err := s.sendMessageIfWritePendingLocked(stream, typ, state); err != nil { return err } - s.startWatchTimersLocked(typ, names) default: // No buffered request. continue @@ -426,6 +409,38 @@ func (s *StreamImpl) sendBuffered(stream transport.StreamingCall) error { return nil } +// sendMessageIfWritePendingLocked attempts to sends a discovery request to the +// server, if there is a pending write for the given resource type. +// +// If the request is successfully sent, the pending write field is cleared and +// watch timers are started for the resources in the request. +// +// Caller needs to hold c.mu. +func (s *StreamImpl) sendMessageIfWritePendingLocked(stream transport.StreamingCall, typ xdsresource.Type, state *resourceTypeState) error { + if !state.pendingWrite { + if s.logger.V(2) { + s.logger.Infof("Skipping sending request for type %q, because all subscribed resources were already sent", typ.TypeURL()) + } + return nil + } + + names := resourceNames(state.subscribedResources) + if err := s.sendMessageLocked(stream, names, typ.TypeURL(), state.version, state.nonce, nil); err != nil { + return err + } + state.pendingWrite = false + + // Drain the buffered requests channel because we just sent a request for this + // resource type. + select { + case <-state.bufferedRequests: + default: + } + + s.startWatchTimersLocked(typ, names) + return nil +} + // sendMessageLocked sends a discovery request to the server, populating the // different fields of the message with the given parameters. Returns a non-nil // error if the request could not be sent. From a0cbb520be35406ba254adce037623084e031cde Mon Sep 17 00:00:00 2001 From: Arjan Singh Bal <46515553+arjan-bal@users.noreply.github.com> Date: Sat, 26 Oct 2024 04:30:14 +0530 Subject: [PATCH 29/57] github: add Go 1.23 testing and make staticcheck work locally with go1.23 (#7751) --- .github/workflows/testing.yml | 18 +++++++++--------- scripts/vet.sh | 8 +++++++- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index a6576a21fa15..c5b7dd461773 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -27,7 +27,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: '1.22' + go-version: '1.23' cache-dependency-path: "**/go.sum" # Run the vet-proto checks. @@ -46,32 +46,32 @@ jobs: goversion: '1.22' - type: extras - goversion: '1.22' + goversion: '1.23' - type: tests - goversion: '1.22' + goversion: '1.23' - type: tests - goversion: '1.22' + goversion: '1.23' testflags: -race - type: tests - goversion: '1.22' + goversion: '1.23' testflags: '-race -tags=buffer_pooling' - type: tests - goversion: '1.22' + goversion: '1.23' goarch: 386 - type: tests - goversion: '1.22' + goversion: '1.23' goarch: arm64 - type: tests - goversion: '1.21' + goversion: '1.22' - type: tests - goversion: '1.22' + goversion: '1.23' testflags: -race grpcenv: 'GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST=true' diff --git a/scripts/vet.sh b/scripts/vet.sh index 0d2b8b8f42ba..8db7e19c5364 100755 --- a/scripts/vet.sh +++ b/scripts/vet.sh @@ -106,7 +106,13 @@ for MOD_FILE in $(find . -name 'go.mod'); do # - Collection of static analysis checks SC_OUT="$(mktemp)" - staticcheck -go 1.22 -checks 'all' ./... >"${SC_OUT}" || true + # By default, Staticcheck targets the Go version declared in go.mod via the go + # directive. For Go 1.21 and newer, that directive specifies the minimum + # required version of Go. + # If a version is provided to Staticcheck using the -go flag, and the go + # toolchain version is higher than the one in go.mod, Staticcheck will report + # errors for usages of new language features in the std lib code. + staticcheck -checks 'all' ./... >"${SC_OUT}" || true # Error for anything other than checks that need exclusions. noret_grep -v "(ST1000)" "${SC_OUT}" | noret_grep -v "(SA1019)" | noret_grep -v "(ST1003)" | noret_grep -v "(ST1019)\|\(other import of\)" | not grep -v "(SA4000)" From cb329375b14e0ddd4e454e18405a732f7c2ccd72 Mon Sep 17 00:00:00 2001 From: Luwei Ge Date: Fri, 25 Oct 2024 16:28:17 -0700 Subject: [PATCH 30/57] credentials: Support ALTSPerRPCCreds in DefaultCredentialsOptions (#7775) * Replace the gRFC pull request with the permanent link. * add ALTSPerRPCCreds in DefaultCredentialsOptions to support channel aware RPC creds * go vet * address comment --- credentials/google/google.go | 33 +++++++++++ credentials/google/google_test.go | 98 +++++++++++++++++++++++++++++++ 2 files changed, 131 insertions(+) diff --git a/credentials/google/google.go b/credentials/google/google.go index fbdf7dc2997a..2f1ac6740654 100644 --- a/credentials/google/google.go +++ b/credentials/google/google.go @@ -39,6 +39,9 @@ var logger = grpclog.Component("credentials") type DefaultCredentialsOptions struct { // PerRPCCreds is a per RPC credentials that is passed to a bundle. PerRPCCreds credentials.PerRPCCredentials + // ALTSPerRPCCreds is a per RPC credentials that, if specified, will + // supercede PerRPCCreds above for and only for ALTS connections. + ALTSPerRPCCreds credentials.PerRPCCredentials } // NewDefaultCredentialsWithOptions returns a credentials bundle that is @@ -55,6 +58,12 @@ func NewDefaultCredentialsWithOptions(opts DefaultCredentialsOptions) credential logger.Warningf("NewDefaultCredentialsWithOptions: failed to create application oauth: %v", err) } } + if opts.ALTSPerRPCCreds != nil { + opts.PerRPCCreds = &dualPerRPCCreds{ + perRPCCreds: opts.PerRPCCreds, + altsPerRPCCreds: opts.ALTSPerRPCCreds, + } + } c := &creds{opts: opts} bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) if err != nil { @@ -143,3 +152,27 @@ func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) { return newCreds, nil } + +// dualPerRPCCreds implements credentials.PerRPCCredentials by embedding the +// fallback PerRPCCredentials and the ALTS one. It pickes one of them based on +// the channel type. +type dualPerRPCCreds struct { + perRPCCreds credentials.PerRPCCredentials + altsPerRPCCreds credentials.PerRPCCredentials +} + +func (d *dualPerRPCCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + ri, ok := credentials.RequestInfoFromContext(ctx) + if !ok { + return nil, fmt.Errorf("request info not found from context") + } + if authType := ri.AuthInfo.AuthType(); authType == "alts" { + return d.altsPerRPCCreds.GetRequestMetadata(ctx, uri...) + } + // This ensures backward compatibility even if authType is not "tls". + return d.perRPCCreds.GetRequestMetadata(ctx, uri...) +} + +func (d *dualPerRPCCreds) RequireTransportSecurity() bool { + return d.altsPerRPCCreds.RequireTransportSecurity() || d.perRPCCreds.RequireTransportSecurity() +} diff --git a/credentials/google/google_test.go b/credentials/google/google_test.go index f9353df80f5b..7b2910a9e0a9 100644 --- a/credentials/google/google_test.go +++ b/credentials/google/google_test.go @@ -23,6 +23,7 @@ import ( "net" "testing" + "github.com/google/go-cmp/cmp" "google.golang.org/grpc/credentials" icredentials "google.golang.org/grpc/internal/credentials" "google.golang.org/grpc/internal/grpctest" @@ -59,6 +60,18 @@ func (t *testAuthInfo) AuthType() string { return t.typ } +type testPerRPCCreds struct { + md map[string]string +} + +func (c *testPerRPCCreds) RequireTransportSecurity() bool { + return true +} + +func (c *testPerRPCCreds) GetRequestMetadata(context.Context, ...string) (map[string]string, error) { + return c.md, nil +} + var ( testTLS = &testCreds{typ: "tls"} testALTS = &testCreds{typ: "alts"} @@ -161,3 +174,88 @@ func (s) TestClientHandshakeBasedOnClusterName(t *testing.T) { } } } + +func TestDefaultCredentialsWithOptions(t *testing.T) { + md1 := map[string]string{"foo": "tls"} + md2 := map[string]string{"foo": "alts"} + tests := []struct { + desc string + defaultCredsOpts DefaultCredentialsOptions + authInfo credentials.AuthInfo + wantedMetadata map[string]string + }{ + { + desc: "no ALTSPerRPCCreds with tls channel", + defaultCredsOpts: DefaultCredentialsOptions{ + PerRPCCreds: &testPerRPCCreds{ + md: md1, + }, + }, + authInfo: &testAuthInfo{typ: "tls"}, + wantedMetadata: md1, + }, + { + desc: "no ALTSPerRPCCreds with alts channel", + defaultCredsOpts: DefaultCredentialsOptions{ + PerRPCCreds: &testPerRPCCreds{ + md: md1, + }, + }, + authInfo: &testAuthInfo{typ: "alts"}, + wantedMetadata: md1, + }, + { + desc: "ALTSPerRPCCreds specified with tls channel", + defaultCredsOpts: DefaultCredentialsOptions{ + PerRPCCreds: &testPerRPCCreds{ + md: md1, + }, + ALTSPerRPCCreds: &testPerRPCCreds{ + md: md2, + }, + }, + authInfo: &testAuthInfo{typ: "tls"}, + wantedMetadata: md1, + }, + { + desc: "ALTSPerRPCCreds specified with alts channel", + defaultCredsOpts: DefaultCredentialsOptions{ + PerRPCCreds: &testPerRPCCreds{ + md: md1, + }, + ALTSPerRPCCreds: &testPerRPCCreds{ + md: md2, + }, + }, + authInfo: &testAuthInfo{typ: "alts"}, + wantedMetadata: md2, + }, + { + desc: "ALTSPerRPCCreds specified with unknown channel", + defaultCredsOpts: DefaultCredentialsOptions{ + PerRPCCreds: &testPerRPCCreds{ + md: md1, + }, + ALTSPerRPCCreds: &testPerRPCCreds{ + md: md2, + }, + }, + authInfo: &testAuthInfo{typ: "foo"}, + wantedMetadata: md1, + }, + } + for _, tc := range tests { + t.Run(tc.desc, func(t *testing.T) { + bundle := NewDefaultCredentialsWithOptions(tc.defaultCredsOpts) + ri := credentials.RequestInfo{AuthInfo: tc.authInfo} + ctx := icredentials.NewRequestInfoContext(context.Background(), ri) + got, err := bundle.PerRPCCredentials().GetRequestMetadata(ctx, "uri") + if err != nil { + t.Fatalf("Bundle's PerRPCCredentials().GetRequestMetadata() unexpected error = %v", err) + } + if diff := cmp.Diff(got, tc.wantedMetadata); diff != "" { + t.Errorf("Unexpected request metadata from bundle's PerRPCCredentials. Diff (-got +want):\n%v", diff) + } + }) + } +} From ada6787961b37252f830fb554022642dbf25933d Mon Sep 17 00:00:00 2001 From: janardhanvissa <47281167+janardhanvissa@users.noreply.github.com> Date: Mon, 28 Oct 2024 14:21:57 +0000 Subject: [PATCH 31/57] cleanup: switching to stubserver in tests instead of testservice implementation (#7708) --- ...ds_client_ignore_resource_deletion_test.go | 12 ++++- .../xds_server_certificate_providers_test.go | 22 +++++++-- test/xds/xds_server_integration_test.go | 46 +++++++++---------- test/xds/xds_server_serving_mode_test.go | 25 ++++++++-- test/xds/xds_server_test.go | 41 +++++++++++++++-- 5 files changed, 110 insertions(+), 36 deletions(-) diff --git a/test/xds/xds_client_ignore_resource_deletion_test.go b/test/xds/xds_client_ignore_resource_deletion_test.go index 6466d3311599..a8078cd206fb 100644 --- a/test/xds/xds_client_ignore_resource_deletion_test.go +++ b/test/xds/xds_client_ignore_resource_deletion_test.go @@ -309,12 +309,22 @@ func setupGRPCServerWithModeChangeChannelAndServe(t *testing.T, bootstrapContent t.Logf("Serving mode for listener %q changed to %q, err: %v", addr.String(), args.Mode, args.Err) updateCh <- args.Mode }) + stub := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + } server, err := xds.NewGRPCServer(grpc.Creds(insecure.NewCredentials()), modeChangeOpt, xds.BootstrapContentsForTesting(bootstrapContents)) if err != nil { t.Fatalf("Failed to create an xDS enabled gRPC server: %v", err) } t.Cleanup(server.Stop) - testgrpc.RegisterTestServiceServer(server, &testService{}) + + stub.S = server + stubserver.StartTestService(t, stub) // Serve. go func() { diff --git a/test/xds/xds_server_certificate_providers_test.go b/test/xds/xds_server_certificate_providers_test.go index 4fb06ce454f0..f277db1376ed 100644 --- a/test/xds/xds_server_certificate_providers_test.go +++ b/test/xds/xds_server_certificate_providers_test.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" xdscreds "google.golang.org/grpc/credentials/xds" + "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/internal/testutils/xds/e2e/setup" @@ -161,9 +162,12 @@ func (s) TestServerSideXDS_WithNoCertificateProvidersInBootstrap_Failure(t *test if err != nil { t.Fatalf("Failed to create an xDS enabled gRPC server: %v", err) } - testgrpc.RegisterTestServiceServer(server, &testService{}) defer server.Stop() + stub := &stubserver.StubServer{} + stub.S = server + stubserver.StartTestService(t, stub) + // Create a local listener and pass it to Serve(). lis, err := testutils.LocalTCPListener() if err != nil { @@ -268,9 +272,9 @@ func (s) TestServerSideXDS_WithValidAndInvalidSecurityConfiguration(t *testing.T t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } - // Create an xDS-enabled grpc server that is configured to use xDS - // credentials, and register the test service on it. Configure a mode change - // option that closes a channel when listener2 enter serving mode. + // Create an xDS-enabled gRPC server that is configured to use xDS + // credentials and assigned to a stub server, configuring a mode change + // option that closes a channel when listener2 enters serving mode. creds, err := xdscreds.NewServerCredentials(xdscreds.ServerOptions{FallbackCreds: insecure.NewCredentials()}) if err != nil { t.Fatal(err) @@ -283,13 +287,21 @@ func (s) TestServerSideXDS_WithValidAndInvalidSecurityConfiguration(t *testing.T } } }) + + stub := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } server, err := xds.NewGRPCServer(grpc.Creds(creds), modeChangeOpt, xds.BootstrapContentsForTesting(bootstrapContents)) if err != nil { t.Fatalf("Failed to create an xDS enabled gRPC server: %v", err) } - testgrpc.RegisterTestServiceServer(server, &testService{}) defer server.Stop() + stub.S = server + stubserver.StartTestService(t, stub) + go func() { if err := server.Serve(lis1); err != nil { t.Errorf("Serve() failed: %v", err) diff --git a/test/xds/xds_server_integration_test.go b/test/xds/xds_server_integration_test.go index 347f069c53bd..eacc6463c395 100644 --- a/test/xds/xds_server_integration_test.go +++ b/test/xds/xds_server_integration_test.go @@ -34,6 +34,7 @@ import ( xdscreds "google.golang.org/grpc/credentials/xds" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/internal/testutils/xds/e2e/setup" @@ -45,27 +46,6 @@ import ( testpb "google.golang.org/grpc/interop/grpc_testing" ) -type testService struct { - testgrpc.TestServiceServer -} - -func (*testService) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, error) { - return &testpb.Empty{}, nil -} - -func (*testService) UnaryCall(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { - return &testpb.SimpleResponse{}, nil -} - -func (*testService) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallServer) error { - for { - _, err := stream.Recv() // hangs here forever if stream doesn't shut down...doesn't receive EOF without any errors - if err == io.EOF { - return nil - } - } -} - func testModeChangeServerOption(t *testing.T) grpc.ServerOption { // Create a server option to get notified about serving mode changes. We don't // do anything other than throwing a log entry here. But this is required, @@ -112,12 +92,32 @@ func setupGRPCServer(t *testing.T, bootstrapContents []byte) (net.Listener, func t.Fatal(err) } - // Initialize an xDS-enabled gRPC server and register the stubServer on it. + // Initialize a test gRPC server, assign it to the stub server, and start + // the test service. + stub := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + for { + _, err := stream.Recv() // hangs here forever if stream doesn't shut down...doesn't receive EOF without any errors + if err == io.EOF { + return nil + } + } + }, + } + server, err := xds.NewGRPCServer(grpc.Creds(creds), testModeChangeServerOption(t), xds.BootstrapContentsForTesting(bootstrapContents)) if err != nil { t.Fatalf("Failed to create an xDS enabled gRPC server: %v", err) } - testgrpc.RegisterTestServiceServer(server, &testService{}) + + stub.S = server + stubserver.StartTestService(t, stub) // Create a local listener and pass it to Serve(). lis, err := testutils.LocalTCPListener() diff --git a/test/xds/xds_server_serving_mode_test.go b/test/xds/xds_server_serving_mode_test.go index 40bc1f6898c0..3ed6750a6353 100644 --- a/test/xds/xds_server_serving_mode_test.go +++ b/test/xds/xds_server_serving_mode_test.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" xdscreds "google.golang.org/grpc/credentials/xds" + "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/internal/testutils/xds/e2e/setup" @@ -62,13 +63,21 @@ func (s) TestServerSideXDS_RedundantUpdateSuppression(t *testing.T) { updateCh <- args.Mode }) - // Initialize an xDS-enabled gRPC server and register the stubServer on it. + // Initialize a test gRPC server, assign it to the stub server, and start + // the test service. + stub := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } server, err := xds.NewGRPCServer(grpc.Creds(creds), modeChangeOpt, xds.BootstrapContentsForTesting(bootstrapContents)) if err != nil { t.Fatalf("Failed to create an xDS enabled gRPC server: %v", err) } defer server.Stop() - testgrpc.RegisterTestServiceServer(server, &testService{}) + + stub.S = server + stubserver.StartTestService(t, stub) // Setup the management server to respond with the listener resources. host, port, err := hostPortFromListener(lis) @@ -206,13 +215,21 @@ func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { } }) - // Initialize an xDS-enabled gRPC server and register the stubServer on it. + // Initialize a test gRPC server, assign it to the stub server, and start + // the test service. + stub := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } server, err := xds.NewGRPCServer(grpc.Creds(creds), modeChangeOpt, xds.BootstrapContentsForTesting(bootstrapContents)) if err != nil { t.Fatalf("Failed to create an xDS enabled gRPC server: %v", err) } defer server.Stop() - testgrpc.RegisterTestServiceServer(server, &testService{}) + + stub.S = server + stubserver.StartTestService(t, stub) // Setup the management server to respond with server-side Listener // resources for both listeners. diff --git a/test/xds/xds_server_test.go b/test/xds/xds_server_test.go index 3ede7af3cb0e..6912757e5e13 100644 --- a/test/xds/xds_server_test.go +++ b/test/xds/xds_server_test.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/internal/testutils/xds/e2e/setup" @@ -93,12 +94,20 @@ func (s) TestServeLDSRDS(t *testing.T) { } }) + stub := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } server, err := xds.NewGRPCServer(grpc.Creds(insecure.NewCredentials()), modeChangeOpt, xds.BootstrapContentsForTesting(bootstrapContents)) if err != nil { t.Fatalf("Failed to create an xDS enabled gRPC server: %v", err) } defer server.Stop() - testgrpc.RegisterTestServiceServer(server, &testService{}) + + stub.S = server + stubserver.StartTestService(t, stub) + go func() { if err := server.Serve(lis); err != nil { t.Errorf("Serve() failed: %v", err) @@ -201,12 +210,20 @@ func (s) TestRDSNack(t *testing.T) { } }) + stub := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } server, err := xds.NewGRPCServer(grpc.Creds(insecure.NewCredentials()), modeChangeOpt, xds.BootstrapContentsForTesting(bootstrapContents)) if err != nil { t.Fatalf("Failed to create an xDS enabled gRPC server: %v", err) } defer server.Stop() - testgrpc.RegisterTestServiceServer(server, &testService{}) + + stub.S = server + stubserver.StartTestService(t, stub) + go func() { if err := server.Serve(lis); err != nil { t.Errorf("Serve() failed: %v", err) @@ -259,12 +276,30 @@ func (s) TestMultipleUpdatesImmediatelySwitch(t *testing.T) { if err := managementServer.Update(ctx, resources); err != nil { t.Fatal(err) } + + stub := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + for { + _, err := stream.Recv() // hangs here forever if stream doesn't shut down...doesn't receive EOF without any errors + if err == io.EOF { + return nil + } + } + }, + } + server, err := xds.NewGRPCServer(grpc.Creds(insecure.NewCredentials()), testModeChangeServerOption(t), xds.BootstrapContentsForTesting(bootstrapContents)) if err != nil { t.Fatalf("Failed to create an xDS enabled gRPC server: %v", err) } defer server.Stop() - testgrpc.RegisterTestServiceServer(server, &testService{}) + + stub.S = server + stubserver.StartTestService(t, stub) + go func() { if err := server.Serve(lis); err != nil { t.Errorf("Serve() failed: %v", err) From 4084b140b762f869ef10481cbef20bd4263cba5a Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 28 Oct 2024 16:47:49 -0400 Subject: [PATCH 32/57] stats/opentelemetry: Remove OpenTelemetry module and add RLS Metrics e2e tests (#7759) --- balancer/rls/metrics_test.go | 367 +++++++++++++++++++++++++++++++++++ examples/go.mod | 1 - examples/go.sum | 4 + gcp/observability/go.sum | 10 +- go.mod | 9 + go.sum | 19 ++ interop/observability/go.sum | 10 +- interop/xds/go.mod | 3 - stats/opencensus/go.sum | 13 ++ stats/opentelemetry/go.mod | 39 ---- stats/opentelemetry/go.sum | 67 ------- 11 files changed, 428 insertions(+), 114 deletions(-) create mode 100644 balancer/rls/metrics_test.go delete mode 100644 stats/opentelemetry/go.mod delete mode 100644 stats/opentelemetry/go.sum diff --git a/balancer/rls/metrics_test.go b/balancer/rls/metrics_test.go new file mode 100644 index 000000000000..ea7a9fd9cdf1 --- /dev/null +++ b/balancer/rls/metrics_test.go @@ -0,0 +1,367 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rls + +import ( + "context" + "math/rand" + "testing" + + "github.com/google/uuid" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/internal/stubserver" + rlstest "google.golang.org/grpc/internal/testutils/rls" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/stats/opentelemetry" +) + +func metricsDataFromReader(ctx context.Context, reader *metric.ManualReader) map[string]metricdata.Metrics { + rm := &metricdata.ResourceMetrics{} + reader.Collect(ctx, rm) + gotMetrics := map[string]metricdata.Metrics{} + for _, sm := range rm.ScopeMetrics { + for _, m := range sm.Metrics { + gotMetrics[m.Name] = m + } + } + return gotMetrics +} + +// TestRLSTargetPickMetric tests RLS Metrics in the case an RLS Balancer picks a +// target from an RLS Response for a RPC. This should emit a +// "grpc.lb.rls.target_picks" with certain labels and cache metrics with certain +// labels. +func (s) TestRLSTargetPickMetric(t *testing.T) { + // Overwrite the uuid random number generator to be deterministic. + uuid.SetRand(rand.New(rand.NewSource(1))) + defer uuid.SetRand(nil) + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil) + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + backend := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started TestService backend at: %q", backend.Address) + defer backend.Stop() + + rlsServer.SetResponseCallback(func(context.Context, *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backend.Address}}} + }) + r := startManualResolverWithConfig(t, rlsConfig) + reader := metric.NewManualReader() + provider := metric.NewMeterProvider(metric.WithReader(reader)) + mo := opentelemetry.MetricsOptions{ + MeterProvider: provider, + Metrics: opentelemetry.DefaultMetrics().Add("grpc.lb.rls.cache_entries", "grpc.lb.rls.cache_size", "grpc.lb.rls.default_target_picks", "grpc.lb.rls.target_picks", "grpc.lb.rls.failed_picks"), + } + grpcTarget := r.Scheme() + ":///" + cc, err := grpc.NewClient(grpcTarget, grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials()), opentelemetry.DialOption(opentelemetry.Options{MetricsOptions: mo})) + if err != nil { + t.Fatalf("Failed to dial local test server: %v", err) + } + defer cc.Close() + + wantMetrics := []metricdata.Metrics{ + { + Name: "grpc.lb.rls.target_picks", + Description: "EXPERIMENTAL. Number of LB picks sent to each RLS target. Note that if the default target is also returned by the RLS server, RPCs sent to that target from the cache will be counted in this metric, not in grpc.rls.default_target_picks.", + Unit: "pick", + Data: metricdata.Sum[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet(attribute.String("grpc.target", grpcTarget), attribute.String("grpc.lb.rls.server_target", rlsServer.Address), attribute.String("grpc.lb.rls.data_plane_target", backend.Address), attribute.String("grpc.lb.pick_result", "complete")), + Value: 1, + }, + }, + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + }, + }, + + // Receives an empty RLS Response, so a single cache entry with no size. + { + Name: "grpc.lb.rls.cache_entries", + Description: "EXPERIMENTAL. Number of entries in the RLS cache.", + Unit: "entry", + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet(attribute.String("grpc.target", grpcTarget), attribute.String("grpc.lb.rls.server_target", rlsServer.Address), attribute.String("grpc.lb.rls.instance_uuid", "52fdfc07-2182-454f-963f-5f0f9a621d72")), + Value: 1, + }, + }, + }, + }, + { + Name: "grpc.lb.rls.cache_size", + Description: "EXPERIMENTAL. The current size of the RLS cache.", + Unit: "By", + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet(attribute.String("grpc.target", grpcTarget), attribute.String("grpc.lb.rls.server_target", rlsServer.Address), attribute.String("grpc.lb.rls.instance_uuid", "52fdfc07-2182-454f-963f-5f0f9a621d72")), + Value: 35, + }, + }, + }, + }, + } + client := testgrpc.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + _, err = client.EmptyCall(ctx, &testpb.Empty{}) + if err != nil { + t.Fatalf("client.EmptyCall failed with error: %v", err) + } + + gotMetrics := metricsDataFromReader(ctx, reader) + for _, metric := range wantMetrics { + val, ok := gotMetrics[metric.Name] + if !ok { + t.Fatalf("Metric %v not present in recorded metrics", metric.Name) + } + if !metricdatatest.AssertEqual(t, metric, val, metricdatatest.IgnoreTimestamp(), metricdatatest.IgnoreExemplars()) { + t.Fatalf("Metrics data type not equal for metric: %v", metric.Name) + } + } + + // Only one pick was made, which was a target pick, so no default target + // pick or failed pick metric should emit. + for _, metric := range []string{"grpc.lb.rls.default_target_picks", "grpc.lb.rls.failed_picks"} { + if _, ok := gotMetrics[metric]; ok { + t.Fatalf("Metric %v present in recorded metrics", metric) + } + } +} + +// TestRLSDefaultTargetPickMetric tests RLS Metrics in the case an RLS Balancer +// falls back to the default target for an RPC. This should emit a +// "grpc.lb.rls.default_target_picks" with certain labels and cache metrics with +// certain labels. +func (s) TestRLSDefaultTargetPickMetric(t *testing.T) { + // Overwrite the uuid random number generator to be deterministic. + uuid.SetRand(rand.New(rand.NewSource(1))) + defer uuid.SetRand(nil) + + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil) + // Build RLS service config with a default target. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + backend := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started TestService backend at: %q", backend.Address) + defer backend.Stop() + rlsConfig.RouteLookupConfig.DefaultTarget = backend.Address + + r := startManualResolverWithConfig(t, rlsConfig) + reader := metric.NewManualReader() + provider := metric.NewMeterProvider(metric.WithReader(reader)) + mo := opentelemetry.MetricsOptions{ + MeterProvider: provider, + Metrics: opentelemetry.DefaultMetrics().Add("grpc.lb.rls.cache_entries", "grpc.lb.rls.cache_size", "grpc.lb.rls.default_target_picks", "grpc.lb.rls.target_picks", "grpc.lb.rls.failed_picks"), + } + grpcTarget := r.Scheme() + ":///" + cc, err := grpc.NewClient(grpcTarget, grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials()), opentelemetry.DialOption(opentelemetry.Options{MetricsOptions: mo})) + if err != nil { + t.Fatalf("Failed to dial local test server: %v", err) + } + defer cc.Close() + + wantMetrics := []metricdata.Metrics{ + { + Name: "grpc.lb.rls.default_target_picks", + Description: "EXPERIMENTAL. Number of LB picks sent to the default target.", + Unit: "pick", + Data: metricdata.Sum[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet(attribute.String("grpc.target", grpcTarget), attribute.String("grpc.lb.rls.server_target", rlsServer.Address), attribute.String("grpc.lb.rls.data_plane_target", backend.Address), attribute.String("grpc.lb.pick_result", "complete")), + Value: 1, + }, + }, + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + }, + }, + // Receives a RLS Response with target information, so a single cache + // entry with a certain size. + { + Name: "grpc.lb.rls.cache_entries", + Description: "EXPERIMENTAL. Number of entries in the RLS cache.", + Unit: "entry", + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet(attribute.String("grpc.target", grpcTarget), attribute.String("grpc.lb.rls.server_target", rlsServer.Address), attribute.String("grpc.lb.rls.instance_uuid", "52fdfc07-2182-454f-963f-5f0f9a621d72")), + Value: 1, + }, + }, + }, + }, + { + Name: "grpc.lb.rls.cache_size", + Description: "EXPERIMENTAL. The current size of the RLS cache.", + Unit: "By", + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet(attribute.String("grpc.target", grpcTarget), attribute.String("grpc.lb.rls.server_target", rlsServer.Address), attribute.String("grpc.lb.rls.instance_uuid", "52fdfc07-2182-454f-963f-5f0f9a621d72")), + Value: 0, + }, + }, + }, + }, + } + client := testgrpc.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err = client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("client.EmptyCall failed with error: %v", err) + } + + gotMetrics := metricsDataFromReader(ctx, reader) + for _, metric := range wantMetrics { + val, ok := gotMetrics[metric.Name] + if !ok { + t.Fatalf("Metric %v not present in recorded metrics", metric.Name) + } + if !metricdatatest.AssertEqual(t, metric, val, metricdatatest.IgnoreTimestamp(), metricdatatest.IgnoreExemplars()) { + t.Fatalf("Metrics data type not equal for metric: %v", metric.Name) + } + } + // No target picks and failed pick metrics should be emitted, as the test + // made only one RPC which recorded as a default target pick. + for _, metric := range []string{"grpc.lb.rls.target_picks", "grpc.lb.rls.failed_picks"} { + if _, ok := gotMetrics[metric]; ok { + t.Fatalf("Metric %v present in recorded metrics", metric) + } + } +} + +// TestRLSFailedRPCMetric tests RLS Metrics in the case an RLS Balancer fails an +// RPC due to an RLS failure. This should emit a +// "grpc.lb.rls.default_target_picks" with certain labels and cache metrics with +// certain labels. +func (s) TestRLSFailedRPCMetric(t *testing.T) { + // Overwrite the uuid random number generator to be deterministic. + uuid.SetRand(rand.New(rand.NewSource(1))) + defer uuid.SetRand(nil) + + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil) + // Build an RLS config without a default target. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + reader := metric.NewManualReader() + provider := metric.NewMeterProvider(metric.WithReader(reader)) + mo := opentelemetry.MetricsOptions{ + MeterProvider: provider, + Metrics: opentelemetry.DefaultMetrics().Add("grpc.lb.rls.cache_entries", "grpc.lb.rls.cache_size", "grpc.lb.rls.default_target_picks", "grpc.lb.rls.target_picks", "grpc.lb.rls.failed_picks"), + } + grpcTarget := r.Scheme() + ":///" + cc, err := grpc.NewClient(grpcTarget, grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials()), opentelemetry.DialOption(opentelemetry.Options{MetricsOptions: mo})) + if err != nil { + t.Fatalf("Failed to dial local test server: %v", err) + } + defer cc.Close() + + wantMetrics := []metricdata.Metrics{ + { + Name: "grpc.lb.rls.failed_picks", + Description: "EXPERIMENTAL. Number of LB picks failed due to either a failed RLS request or the RLS channel being throttled.", + Unit: "pick", + Data: metricdata.Sum[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet(attribute.String("grpc.target", grpcTarget), attribute.String("grpc.lb.rls.server_target", rlsServer.Address)), + Value: 1, + }, + }, + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + }, + }, + // Receives an empty RLS Response, so a single cache entry with no size. + { + Name: "grpc.lb.rls.cache_entries", + Description: "EXPERIMENTAL. Number of entries in the RLS cache.", + Unit: "entry", + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet(attribute.String("grpc.target", grpcTarget), attribute.String("grpc.lb.rls.server_target", rlsServer.Address), attribute.String("grpc.lb.rls.instance_uuid", "52fdfc07-2182-454f-963f-5f0f9a621d72")), + Value: 1, + }, + }, + }, + }, + { + Name: "grpc.lb.rls.cache_size", + Description: "EXPERIMENTAL. The current size of the RLS cache.", + Unit: "By", + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet(attribute.String("grpc.target", grpcTarget), attribute.String("grpc.lb.rls.server_target", rlsServer.Address), attribute.String("grpc.lb.rls.instance_uuid", "52fdfc07-2182-454f-963f-5f0f9a621d72")), + Value: 0, + }, + }, + }, + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client := testgrpc.NewTestServiceClient(cc) + if _, err = client.EmptyCall(ctx, &testpb.Empty{}); err == nil { + t.Fatalf("client.EmptyCall error = %v, expected a non nil error", err) + } + + gotMetrics := metricsDataFromReader(ctx, reader) + for _, metric := range wantMetrics { + val, ok := gotMetrics[metric.Name] + if !ok { + t.Fatalf("Metric %v not present in recorded metrics", metric.Name) + } + if !metricdatatest.AssertEqual(t, metric, val, metricdatatest.IgnoreTimestamp(), metricdatatest.IgnoreExemplars()) { + t.Fatalf("Metrics data type not equal for metric: %v", metric.Name) + } + } + // Only one RPC was made, which was a failed pick due to an RLS failure, so + // no metrics for target picks or default target picks should have emitted. + for _, metric := range []string{"grpc.lb.rls.target_picks", "grpc.lb.rls.default_target_picks"} { + if _, ok := gotMetrics[metric]; ok { + t.Fatalf("Metric %v present in recorded metrics", metric) + } + } +} diff --git a/examples/go.mod b/examples/go.mod index 0dd53bee1cc1..52451c857af7 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -12,7 +12,6 @@ require ( google.golang.org/grpc v1.67.1 google.golang.org/grpc/gcp/observability v1.0.1 google.golang.org/grpc/security/advancedtls v1.0.0 - google.golang.org/grpc/stats/opentelemetry v0.0.0-20241017035653-830135e6c5a3 google.golang.org/protobuf v1.35.1 ) diff --git a/examples/go.sum b/examples/go.sum index 182b57b230cc..5bfb0231ed13 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -220,6 +220,7 @@ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1h cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= @@ -776,6 +777,7 @@ gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zum git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.2/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3 h1:cb3br57K508pQEFgBxn9GDhPS9HefpyMPK1RzmtMNzk= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= @@ -1078,6 +1080,7 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= @@ -1433,6 +1436,7 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum index a328c2053c70..30e984fb4343 100644 --- a/gcp/observability/go.sum +++ b/gcp/observability/go.sum @@ -219,6 +219,7 @@ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1h cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= @@ -775,6 +776,7 @@ gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zum git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.2/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= @@ -1054,6 +1056,7 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= @@ -1093,6 +1096,7 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/detectors/gcp v1.31.0/go.mod h1:tzQL6E1l+iV44YFTkcAeNQqzXUiekSYP9jjJjXwEd00= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= @@ -1101,8 +1105,9 @@ go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= -go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -1403,6 +1408,7 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/go.mod b/go.mod index 80f755087ca0..91008eeb7a42 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,11 @@ require ( github.com/golang/protobuf v1.5.4 github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.6.0 + go.opentelemetry.io/contrib/detectors/gcp v1.31.0 + go.opentelemetry.io/otel v1.31.0 + go.opentelemetry.io/otel/metric v1.31.0 + go.opentelemetry.io/otel/sdk v1.31.0 + go.opentelemetry.io/otel/sdk/metric v1.31.0 golang.org/x/net v0.30.0 golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.8.0 @@ -21,9 +26,13 @@ require ( require ( cel.dev/expr v0.16.2 // indirect cloud.google.com/go/compute/metadata v0.5.2 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.2 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + go.opentelemetry.io/otel/trace v1.31.0 // indirect golang.org/x/text v0.19.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect ) diff --git a/go.sum b/go.sum index 8ed7162313b9..fe263da6d774 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,8 @@ cel.dev/expr v0.16.2 h1:RwRhoH17VhAu9U5CMvMhH1PDVgf0tuz9FT+24AfMLfU= cel.dev/expr v0.16.2/go.mod h1:gXngZQMkWJoSbE8mOzehJlXQyubn/Vg0vR9/F3W7iw8= cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.2 h1:cZpsGsWTIFKymTA0je7IIvi1O7Es7apb9CF3EQlOcfE= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.2/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= @@ -14,6 +16,11 @@ github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/ github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= @@ -28,6 +35,18 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +go.opentelemetry.io/contrib/detectors/gcp v1.31.0 h1:G1JQOreVrfhRkner+l4mrGxmfqYCAuy76asTDAo0xsA= +go.opentelemetry.io/contrib/detectors/gcp v1.31.0/go.mod h1:tzQL6E1l+iV44YFTkcAeNQqzXUiekSYP9jjJjXwEd00= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= diff --git a/interop/observability/go.sum b/interop/observability/go.sum index fc68e26e9399..a749b30fb223 100644 --- a/interop/observability/go.sum +++ b/interop/observability/go.sum @@ -219,6 +219,7 @@ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1h cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= @@ -775,6 +776,7 @@ gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zum git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.2/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= @@ -1056,6 +1058,7 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= @@ -1095,6 +1098,7 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/detectors/gcp v1.31.0/go.mod h1:tzQL6E1l+iV44YFTkcAeNQqzXUiekSYP9jjJjXwEd00= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= @@ -1103,8 +1107,9 @@ go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= -go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -1405,6 +1410,7 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/interop/xds/go.mod b/interop/xds/go.mod index d4052ef96dd4..d3fa9e1b1547 100644 --- a/interop/xds/go.mod +++ b/interop/xds/go.mod @@ -4,14 +4,11 @@ go 1.22.7 replace google.golang.org/grpc => ../.. -replace google.golang.org/grpc/stats/opentelemetry => ../../stats/opentelemetry - require ( github.com/prometheus/client_golang v1.20.5 go.opentelemetry.io/otel/exporters/prometheus v0.53.0 go.opentelemetry.io/otel/sdk/metric v1.31.0 google.golang.org/grpc v1.67.1 - google.golang.org/grpc/stats/opentelemetry v0.0.0-20241017035653-830135e6c5a3 ) require ( diff --git a/stats/opencensus/go.sum b/stats/opencensus/go.sum index f545f284c333..447a12f3eb15 100644 --- a/stats/opencensus/go.sum +++ b/stats/opencensus/go.sum @@ -213,6 +213,7 @@ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1h cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= @@ -756,6 +757,7 @@ gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zum git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.2/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= @@ -818,6 +820,9 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= @@ -992,6 +997,7 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= @@ -1030,6 +1036,12 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/detectors/gcp v1.31.0/go.mod h1:tzQL6E1l+iV44YFTkcAeNQqzXUiekSYP9jjJjXwEd00= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -1325,6 +1337,7 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/stats/opentelemetry/go.mod b/stats/opentelemetry/go.mod deleted file mode 100644 index 61e31b4ce343..000000000000 --- a/stats/opentelemetry/go.mod +++ /dev/null @@ -1,39 +0,0 @@ -module google.golang.org/grpc/stats/opentelemetry - -go 1.22.7 - -replace google.golang.org/grpc => ../.. - -require ( - github.com/envoyproxy/go-control-plane v0.13.1 - github.com/google/go-cmp v0.6.0 - go.opentelemetry.io/contrib/detectors/gcp v1.31.0 - go.opentelemetry.io/otel v1.31.0 - go.opentelemetry.io/otel/metric v1.31.0 - go.opentelemetry.io/otel/sdk v1.31.0 - go.opentelemetry.io/otel/sdk/metric v1.31.0 - google.golang.org/grpc v1.67.1 - google.golang.org/protobuf v1.35.1 -) - -require ( - cel.dev/expr v0.16.2 // indirect - cloud.google.com/go/compute/metadata v0.5.2 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3 // indirect - github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect - github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect - go.opentelemetry.io/otel/trace v1.31.0 // indirect - golang.org/x/net v0.30.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.19.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect -) diff --git a/stats/opentelemetry/go.sum b/stats/opentelemetry/go.sum deleted file mode 100644 index b2d047c8cb0f..000000000000 --- a/stats/opentelemetry/go.sum +++ /dev/null @@ -1,67 +0,0 @@ -cel.dev/expr v0.16.2 h1:RwRhoH17VhAu9U5CMvMhH1PDVgf0tuz9FT+24AfMLfU= -cel.dev/expr v0.16.2/go.mod h1:gXngZQMkWJoSbE8mOzehJlXQyubn/Vg0vR9/F3W7iw8= -cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= -cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3 h1:cb3br57K508pQEFgBxn9GDhPS9HefpyMPK1RzmtMNzk= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= -github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= -github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= -github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= -github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= -github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= -github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= -github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -go.opentelemetry.io/contrib/detectors/gcp v1.31.0 h1:G1JQOreVrfhRkner+l4mrGxmfqYCAuy76asTDAo0xsA= -go.opentelemetry.io/contrib/detectors/gcp v1.31.0/go.mod h1:tzQL6E1l+iV44YFTkcAeNQqzXUiekSYP9jjJjXwEd00= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= -go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= -go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= -google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From e7435d60590e96dcc7e7fcf01a5ee7aa844d0f7f Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 28 Oct 2024 17:00:14 -0400 Subject: [PATCH 33/57] balancer/endpointsharding: Ignore empty endpoints (#7674) --- balancer/endpointsharding/endpointsharding.go | 19 ++--- resolver/resolver.go | 18 +++++ resolver/resolver_test.go | 72 +++++++++++++++++++ 3 files changed, 95 insertions(+), 14 deletions(-) diff --git a/balancer/endpointsharding/endpointsharding.go b/balancer/endpointsharding/endpointsharding.go index 8f28da75c6bc..8425ddea56d0 100644 --- a/balancer/endpointsharding/endpointsharding.go +++ b/balancer/endpointsharding/endpointsharding.go @@ -28,7 +28,6 @@ package endpointsharding import ( "encoding/json" "errors" - "fmt" "math/rand" "sync" "sync/atomic" @@ -80,20 +79,9 @@ type endpointSharding struct { // for endpoints that are no longer present. It also updates all the children, // and sends a single synchronous update of the childrens' aggregated state at // the end of the UpdateClientConnState operation. If any endpoint has no -// addresses, returns error without forwarding any updates. Otherwise returns -// first error found from a child, but fully processes the new update. +// addresses it will ignore that endpoint. Otherwise, returns first error found +// from a child, but fully processes the new update. func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState) error { - if len(state.ResolverState.Endpoints) == 0 { - return errors.New("endpoints list is empty") - } - // Check/return early if any endpoints have no addresses. - // TODO: make this configurable if needed. - for i, endpoint := range state.ResolverState.Endpoints { - if len(endpoint.Addresses) == 0 { - return fmt.Errorf("endpoint %d has empty addresses", i) - } - } - es.inhibitChildUpdates.Store(true) defer func() { es.inhibitChildUpdates.Store(false) @@ -106,6 +94,9 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState // Update/Create new children. for _, endpoint := range state.ResolverState.Endpoints { + if len(endpoint.Addresses) == 0 { + continue + } if _, ok := newChildren.Get(endpoint); ok { // Endpoint child was already created, continue to avoid duplicate // update. diff --git a/resolver/resolver.go b/resolver/resolver.go index 96294ac3f444..8eb1cf3bcfaf 100644 --- a/resolver/resolver.go +++ b/resolver/resolver.go @@ -22,6 +22,7 @@ package resolver import ( "context" + "errors" "fmt" "net" "net/url" @@ -330,3 +331,20 @@ type AuthorityOverrider interface { // typically in line, and must keep it unchanged. OverrideAuthority(Target) string } + +// ValidateEndpoints validates endpoints from a petiole policy's perspective. +// Petiole policies should call this before calling into their children. See +// [gRPC A61](https://github.com/grpc/proposal/blob/master/A61-IPv4-IPv6-dualstack-backends.md) +// for details. +func ValidateEndpoints(endpoints []Endpoint) error { + if len(endpoints) == 0 { + return errors.New("endpoints list is empty") + } + + for _, endpoint := range endpoints { + for range endpoint.Addresses { + return nil + } + } + return errors.New("endpoints list contains no addresses") +} diff --git a/resolver/resolver_test.go b/resolver/resolver_test.go index 8d061f9b66d2..33d2e27feb96 100644 --- a/resolver/resolver_test.go +++ b/resolver/resolver_test.go @@ -31,3 +31,75 @@ type s struct { func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } + +// TestValidateEndpoints tests different scenarios of resolver addresses being +// validated by the ValidateEndpoint helper. +func (s) TestValidateEndpoints(t *testing.T) { + addr1 := Address{Addr: "addr1"} + addr2 := Address{Addr: "addr2"} + addr3 := Address{Addr: "addr3"} + addr4 := Address{Addr: "addr4"} + tests := []struct { + name string + endpoints []Endpoint + wantErr bool + }{ + { + name: "duplicate-address-across-endpoints", + endpoints: []Endpoint{ + {Addresses: []Address{addr1}}, + {Addresses: []Address{addr1}}, + }, + wantErr: false, + }, + { + name: "duplicate-address-same-endpoint", + endpoints: []Endpoint{ + {Addresses: []Address{addr1, addr1}}, + }, + wantErr: false, + }, + { + name: "duplicate-address-across-endpoints-plural-addresses", + endpoints: []Endpoint{ + {Addresses: []Address{addr1, addr2, addr3}}, + {Addresses: []Address{addr3, addr4}}, + }, + wantErr: false, + }, + { + name: "no-shared-addresses", + endpoints: []Endpoint{ + {Addresses: []Address{addr1, addr2}}, + {Addresses: []Address{addr3, addr4}}, + }, + wantErr: false, + }, + { + name: "endpoint-with-no-addresses", + endpoints: []Endpoint{ + {Addresses: []Address{addr1, addr2}}, + {Addresses: []Address{}}, + }, + wantErr: false, + }, + { + name: "empty-endpoints-list", + endpoints: []Endpoint{}, + wantErr: true, + }, + { + name: "endpoint-list-with-no-addresses", + endpoints: []Endpoint{{}, {}}, + wantErr: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := ValidateEndpoints(test.endpoints) + if (err != nil) != test.wantErr { + t.Fatalf("ValidateEndpoints() wantErr: %v, got: %v", test.wantErr, err) + } + }) + } +} From 192ee33f6fc0f07070eeaaa1d34e41746740e64c Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 28 Oct 2024 14:58:42 -0700 Subject: [PATCH 34/57] multiple: add verbosity checks to logs that use pretty.JSON (#7785) --- balancer/rls/config.go | 5 ++++- balancer/rls/control_channel.go | 4 +++- balancer/weightedtarget/weightedtarget.go | 5 ++++- xds/internal/balancer/clustermanager/clustermanager.go | 5 ++++- xds/internal/balancer/clusterresolver/clusterresolver.go | 5 ++++- xds/internal/balancer/ringhash/ringhash.go | 5 ++++- 6 files changed, 23 insertions(+), 6 deletions(-) diff --git a/balancer/rls/config.go b/balancer/rls/config.go index 439581c78bc1..ff540aa058b8 100644 --- a/balancer/rls/config.go +++ b/balancer/rls/config.go @@ -143,7 +143,10 @@ type lbConfigJSON struct { // - childPolicyConfigTargetFieldName: // - must be set and non-empty func (rlsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - logger.Infof("Received JSON service config: %v", pretty.ToJSON(c)) + if logger.V(2) { + logger.Infof("Received JSON service config: %v", pretty.ToJSON(c)) + } + cfgJSON := &lbConfigJSON{} if err := json.Unmarshal(c, cfgJSON); err != nil { return nil, fmt.Errorf("rls: json unmarshal failed for service config %+v: %v", string(c), err) diff --git a/balancer/rls/control_channel.go b/balancer/rls/control_channel.go index 4acc11d90e94..f2ad8bc720e4 100644 --- a/balancer/rls/control_channel.go +++ b/balancer/rls/control_channel.go @@ -209,7 +209,9 @@ func (cc *controlChannel) lookup(reqKeys map[string]string, reason rlspb.RouteLo Reason: reason, StaleHeaderData: staleHeaders, } - cc.logger.Infof("Sending RLS request %+v", pretty.ToJSON(req)) + if cc.logger.V(2) { + cc.logger.Infof("Sending RLS request %+v", pretty.ToJSON(req)) + } ctx, cancel := context.WithTimeout(context.Background(), cc.rpcTimeout) defer cancel() diff --git a/balancer/weightedtarget/weightedtarget.go b/balancer/weightedtarget/weightedtarget.go index dfd1ef26dcb0..c004e112c40a 100644 --- a/balancer/weightedtarget/weightedtarget.go +++ b/balancer/weightedtarget/weightedtarget.go @@ -99,7 +99,10 @@ func LocalityFromResolverState(state resolver.State) string { // creates/deletes sub-balancers and sends them update. addresses are split into // groups based on hierarchy path. func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + if b.logger.V(2) { + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + } + newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) diff --git a/xds/internal/balancer/clustermanager/clustermanager.go b/xds/internal/balancer/clustermanager/clustermanager.go index e6d751ecbee4..ef5b34ea4451 100644 --- a/xds/internal/balancer/clustermanager/clustermanager.go +++ b/xds/internal/balancer/clustermanager/clustermanager.go @@ -167,11 +167,14 @@ func (b *bal) updateChildren(s balancer.ClientConnState, newConfig *lbConfig) er } func (b *bal) UpdateClientConnState(s balancer.ClientConnState) error { + if b.logger.V(2) { + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + } + newConfig, ok := s.BalancerConfig.(*lbConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } - b.logger.Infof("Update with config %+v, resolver state %+v", pretty.ToJSON(s.BalancerConfig), s.ResolverState) b.stateAggregator.pauseStateUpdates() defer b.stateAggregator.resumeStateUpdates() diff --git a/xds/internal/balancer/clusterresolver/clusterresolver.go b/xds/internal/balancer/clusterresolver/clusterresolver.go index 3f0c54e8f389..3b996989689e 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -184,7 +184,10 @@ func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) { return } - b.logger.Infof("Received new balancer config: %v", pretty.ToJSON(update.state.BalancerConfig)) + if b.logger.V(2) { + b.logger.Infof("Received new balancer config: %v", pretty.ToJSON(update.state.BalancerConfig)) + } + cfg, _ := update.state.BalancerConfig.(*LBConfig) if cfg == nil { b.logger.Warningf("Ignoring unsupported balancer configuration of type: %T", update.state.BalancerConfig) diff --git a/xds/internal/balancer/ringhash/ringhash.go b/xds/internal/balancer/ringhash/ringhash.go index ef054d48aa4e..8c44f19c3b14 100644 --- a/xds/internal/balancer/ringhash/ringhash.go +++ b/xds/internal/balancer/ringhash/ringhash.go @@ -285,7 +285,10 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { } func (b *ringhashBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + if b.logger.V(2) { + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + } + newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) From 091d20bfe2324518dc8f3e15b0f2e54ce9124718 Mon Sep 17 00:00:00 2001 From: Evan Jones Date: Tue, 29 Oct 2024 12:07:25 -0400 Subject: [PATCH 35/57] server: Only call FromIncomingContext with stats handlers (#7781) --- server.go | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/server.go b/server.go index d1e1415a40f9..62a5b04b76e2 100644 --- a/server.go +++ b/server.go @@ -1783,17 +1783,20 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str service := sm[:pos] method := sm[pos+1:] - md, _ := metadata.FromIncomingContext(ctx) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) - sh.HandleRPC(ctx, &stats.InHeader{ - FullMethod: stream.Method(), - RemoteAddr: t.Peer().Addr, - LocalAddr: t.Peer().LocalAddr, - Compression: stream.RecvCompress(), - WireLength: stream.HeaderWireLength(), - Header: md, - }) + // FromIncomingContext is expensive: skip if there are no statsHandlers + if len(s.opts.statsHandlers) > 0 { + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } } // To have calls in stream callouts work. Will delete once all stats handler // calls come from the gRPC layer. From 6fd86d35ba07a0419b9b8e397858298985f3ae7a Mon Sep 17 00:00:00 2001 From: Abhishek Ranjan <159750762+aranjans@users.noreply.github.com> Date: Tue, 29 Oct 2024 21:39:04 +0530 Subject: [PATCH 36/57] Disable buffer_pooling tests (#7762) Co-authored-by: Doug Fawley --- .github/workflows/testing.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index c5b7dd461773..43717dbf01e5 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -55,10 +55,6 @@ jobs: goversion: '1.23' testflags: -race - - type: tests - goversion: '1.23' - testflags: '-race -tags=buffer_pooling' - - type: tests goversion: '1.23' goarch: 386 From 52d7f6af60d427c1588e8223b07318795caace84 Mon Sep 17 00:00:00 2001 From: Marco Ferrer <35935108+marcoferrer@users.noreply.github.com> Date: Tue, 29 Oct 2024 13:43:58 -0400 Subject: [PATCH 37/57] multiple: switch to math/rand/v2 (#7711) Co-authored-by: Arvind Bright --- balancer/endpointsharding/endpointsharding.go | 5 +++-- balancer/grpclb/grpclb_picker.go | 6 +++--- balancer/leastrequest/leastrequest.go | 2 +- balancer/pickfirst/pickfirst.go | 2 +- balancer/rls/internal/adaptive/adaptive.go | 6 +++--- balancer/roundrobin/roundrobin.go | 4 ++-- balancer/weightedroundrobin/balancer.go | 2 +- benchmark/benchmain/main.go | 6 +++--- benchmark/benchmark.go | 4 ++-- benchmark/stats/curve.go | 4 ++-- benchmark/worker/benchmark_client.go | 2 +- examples/features/debugging/server/main.go | 4 ++-- examples/features/xds/server/main.go | 2 +- examples/route_guide/client/client.go | 13 ++++++------- internal/backoff/backoff.go | 2 +- internal/resolver/dns/dns_resolver.go | 4 ++-- internal/serviceconfig/duration_test.go | 2 +- internal/transport/http2_server.go | 4 ++-- internal/wrr/random.go | 8 ++++---- internal/wrr/wrr_test.go | 8 ++++---- interop/stress/client/main.go | 4 ++-- stream.go | 4 ++-- xds/googledirectpath/googlec2p.go | 2 +- xds/internal/balancer/outlierdetection/balancer.go | 6 +++--- .../balancer/ringhash/e2e/ringhash_balancer_test.go | 2 +- xds/internal/httpfilter/fault/fault.go | 4 ++-- xds/internal/httpfilter/fault/fault_test.go | 4 ++-- xds/internal/resolver/serviceconfig.go | 2 +- xds/internal/resolver/xds_resolver.go | 2 +- xds/internal/xdsclient/xdsresource/matcher.go | 8 ++++---- xds/internal/xdsclient/xdsresource/matcher_test.go | 10 +++++----- 31 files changed, 69 insertions(+), 69 deletions(-) diff --git a/balancer/endpointsharding/endpointsharding.go b/balancer/endpointsharding/endpointsharding.go index 8425ddea56d0..9238d3278204 100644 --- a/balancer/endpointsharding/endpointsharding.go +++ b/balancer/endpointsharding/endpointsharding.go @@ -28,10 +28,11 @@ package endpointsharding import ( "encoding/json" "errors" - "math/rand" "sync" "sync/atomic" + rand "math/rand/v2" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/connectivity" @@ -225,7 +226,7 @@ func (es *endpointSharding) updateState() { p := &pickerWithChildStates{ pickers: pickers, childStates: childStates, - next: uint32(rand.Intn(len(pickers))), + next: uint32(rand.IntN(len(pickers))), } es.cc.UpdateState(balancer.State{ ConnectivityState: aggState, diff --git a/balancer/grpclb/grpclb_picker.go b/balancer/grpclb/grpclb_picker.go index 671bc663fcb0..9ff07522d786 100644 --- a/balancer/grpclb/grpclb_picker.go +++ b/balancer/grpclb/grpclb_picker.go @@ -19,7 +19,7 @@ package grpclb import ( - "math/rand" + rand "math/rand/v2" "sync" "sync/atomic" @@ -112,7 +112,7 @@ type rrPicker struct { func newRRPicker(readySCs []balancer.SubConn) *rrPicker { return &rrPicker{ subConns: readySCs, - subConnsNext: rand.Intn(len(readySCs)), + subConnsNext: rand.IntN(len(readySCs)), } } @@ -147,7 +147,7 @@ func newLBPicker(serverList []*lbpb.Server, readySCs []balancer.SubConn, stats * return &lbPicker{ serverList: serverList, subConns: readySCs, - subConnsNext: rand.Intn(len(readySCs)), + subConnsNext: rand.IntN(len(readySCs)), stats: stats, } } diff --git a/balancer/leastrequest/leastrequest.go b/balancer/leastrequest/leastrequest.go index ddd9bd269bf4..6dede1a40b70 100644 --- a/balancer/leastrequest/leastrequest.go +++ b/balancer/leastrequest/leastrequest.go @@ -22,7 +22,7 @@ package leastrequest import ( "encoding/json" "fmt" - "math/rand" + rand "math/rand/v2" "sync/atomic" "google.golang.org/grpc/balancer" diff --git a/balancer/pickfirst/pickfirst.go b/balancer/pickfirst/pickfirst.go index e069346a7565..ea8899818c22 100644 --- a/balancer/pickfirst/pickfirst.go +++ b/balancer/pickfirst/pickfirst.go @@ -23,7 +23,7 @@ import ( "encoding/json" "errors" "fmt" - "math/rand" + rand "math/rand/v2" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" diff --git a/balancer/rls/internal/adaptive/adaptive.go b/balancer/rls/internal/adaptive/adaptive.go index 8b1786043486..6249948ede72 100644 --- a/balancer/rls/internal/adaptive/adaptive.go +++ b/balancer/rls/internal/adaptive/adaptive.go @@ -20,15 +20,15 @@ package adaptive import ( - "math/rand" + rand "math/rand/v2" "sync" "time" ) // For overriding in unittests. var ( - timeNowFunc = func() time.Time { return time.Now() } - randFunc = func() float64 { return rand.Float64() } + timeNowFunc = time.Now + randFunc = rand.Float64 ) const ( diff --git a/balancer/roundrobin/roundrobin.go b/balancer/roundrobin/roundrobin.go index 260255d31b6a..80a42d22510c 100644 --- a/balancer/roundrobin/roundrobin.go +++ b/balancer/roundrobin/roundrobin.go @@ -22,7 +22,7 @@ package roundrobin import ( - "math/rand" + rand "math/rand/v2" "sync/atomic" "google.golang.org/grpc/balancer" @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: uint32(rand.Intn(len(scs))), + next: uint32(rand.IntN(len(scs))), } } diff --git a/balancer/weightedroundrobin/balancer.go b/balancer/weightedroundrobin/balancer.go index 1ea9eba4c894..a0511772d2fa 100644 --- a/balancer/weightedroundrobin/balancer.go +++ b/balancer/weightedroundrobin/balancer.go @@ -23,7 +23,7 @@ import ( "encoding/json" "errors" "fmt" - "math/rand" + rand "math/rand/v2" "sync" "sync/atomic" "time" diff --git a/benchmark/benchmain/main.go b/benchmark/benchmain/main.go index 79004f42cba1..a298108786e9 100644 --- a/benchmark/benchmain/main.go +++ b/benchmark/benchmain/main.go @@ -47,7 +47,7 @@ import ( "fmt" "io" "log" - "math/rand" + rand "math/rand/v2" "net" "os" "reflect" @@ -283,7 +283,7 @@ func unconstrainedStreamBenchmark(start startFunc, stop ucStopFunc, bf stats.Fea defer wg.Done() for { if maxSleep > 0 { - time.Sleep(time.Duration(rand.Intn(maxSleep))) + time.Sleep(time.Duration(rand.IntN(maxSleep))) } t := time.Now() if t.After(bmEnd) { @@ -574,7 +574,7 @@ func runBenchmark(caller rpcCallFunc, start startFunc, stop stopFunc, bf stats.F defer wg.Done() for { if maxSleep > 0 { - time.Sleep(time.Duration(rand.Intn(maxSleep))) + time.Sleep(time.Duration(rand.IntN(maxSleep))) } t := time.Now() if t.After(bmEnd) { diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index 07c8c9db6651..0d4558f4ec80 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -26,7 +26,7 @@ import ( "fmt" "io" "log" - "math/rand" + rand "math/rand/v2" "net" "strconv" "time" @@ -187,7 +187,7 @@ func (s *testServer) UnconstrainedStreamingCall(stream testgrpc.BenchmarkService go func() { for { if maxSleep > 0 { - time.Sleep(time.Duration(rand.Intn(maxSleep))) + time.Sleep(time.Duration(rand.IntN(maxSleep))) } var err error if preloadMsgSize > 0 { diff --git a/benchmark/stats/curve.go b/benchmark/stats/curve.go index 124183dac2ea..801403169d19 100644 --- a/benchmark/stats/curve.go +++ b/benchmark/stats/curve.go @@ -24,7 +24,7 @@ import ( "encoding/hex" "fmt" "math" - "math/rand" + rand "math/rand/v2" "os" "sort" "strconv" @@ -74,7 +74,7 @@ func (pcr *payloadCurveRange) chooseRandom() int { return int(pcr.from) } - return int(rand.Int31n(pcr.to-pcr.from+1) + pcr.from) + return int(rand.Int32N(pcr.to-pcr.from+1) + pcr.from) } // sha256file is a helper function that returns a hex string matching the diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index d3d04cd012c3..45f5b2e9ec37 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -22,7 +22,7 @@ import ( "context" "flag" "math" - "math/rand" + rand "math/rand/v2" "runtime" "sync" "time" diff --git a/examples/features/debugging/server/main.go b/examples/features/debugging/server/main.go index 5dd31af9cedc..21c15e8718fb 100644 --- a/examples/features/debugging/server/main.go +++ b/examples/features/debugging/server/main.go @@ -23,7 +23,7 @@ package main import ( "context" "log" - "math/rand" + rand "math/rand/v2" "net" "time" @@ -55,7 +55,7 @@ type slowServer struct { // SayHello implements helloworld.GreeterServer func (s *slowServer) SayHello(_ context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { // Delay 100ms ~ 200ms before replying - time.Sleep(time.Duration(100+rand.Intn(100)) * time.Millisecond) + time.Sleep(time.Duration(100+rand.IntN(100)) * time.Millisecond) return &pb.HelloReply{Message: "Hello " + in.Name}, nil } diff --git a/examples/features/xds/server/main.go b/examples/features/xds/server/main.go index 9f08e69932f7..c4a276844822 100644 --- a/examples/features/xds/server/main.go +++ b/examples/features/xds/server/main.go @@ -25,7 +25,7 @@ import ( "flag" "fmt" "log" - "math/rand" + rand "math/rand/v2" "net" "os" diff --git a/examples/route_guide/client/client.go b/examples/route_guide/client/client.go index 49c80932f96d..57f868af2e35 100644 --- a/examples/route_guide/client/client.go +++ b/examples/route_guide/client/client.go @@ -27,7 +27,7 @@ import ( "flag" "io" "log" - "math/rand" + rand "math/rand/v2" "time" "google.golang.org/grpc" @@ -81,11 +81,10 @@ func printFeatures(client pb.RouteGuideClient, rect *pb.Rectangle) { // runRecordRoute sends a sequence of points to server and expects to get a RouteSummary from server. func runRecordRoute(client pb.RouteGuideClient) { // Create a random number of random points - r := rand.New(rand.NewSource(time.Now().UnixNano())) - pointCount := int(r.Int31n(100)) + 2 // Traverse at least two points + pointCount := int(rand.Int32N(100)) + 2 // Traverse at least two points var points []*pb.Point for i := 0; i < pointCount; i++ { - points = append(points, randomPoint(r)) + points = append(points, randomPoint()) } log.Printf("Traversing %d points.", len(points)) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -146,9 +145,9 @@ func runRouteChat(client pb.RouteGuideClient) { <-waitc } -func randomPoint(r *rand.Rand) *pb.Point { - lat := (r.Int31n(180) - 90) * 1e7 - long := (r.Int31n(360) - 180) * 1e7 +func randomPoint() *pb.Point { + lat := (rand.Int32N(180) - 90) * 1e7 + long := (rand.Int32N(360) - 180) * 1e7 return &pb.Point{Latitude: lat, Longitude: long} } diff --git a/internal/backoff/backoff.go b/internal/backoff/backoff.go index b15cf482d292..b6ae7f258505 100644 --- a/internal/backoff/backoff.go +++ b/internal/backoff/backoff.go @@ -25,7 +25,7 @@ package backoff import ( "context" "errors" - "math/rand" + rand "math/rand/v2" "time" grpcbackoff "google.golang.org/grpc/backoff" diff --git a/internal/resolver/dns/dns_resolver.go b/internal/resolver/dns/dns_resolver.go index 8691698ef223..cc5d5e05c010 100644 --- a/internal/resolver/dns/dns_resolver.go +++ b/internal/resolver/dns/dns_resolver.go @@ -24,7 +24,7 @@ import ( "context" "encoding/json" "fmt" - "math/rand" + rand "math/rand/v2" "net" "os" "strconv" @@ -425,7 +425,7 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } - return rand.Intn(100)+1 <= *a + return rand.IntN(100)+1 <= *a } func canaryingSC(js string) string { diff --git a/internal/serviceconfig/duration_test.go b/internal/serviceconfig/duration_test.go index b03a4508b4ea..82c0dcdce2a8 100644 --- a/internal/serviceconfig/duration_test.go +++ b/internal/serviceconfig/duration_test.go @@ -21,7 +21,7 @@ package serviceconfig import ( "fmt" "math" - "math/rand" + rand "math/rand/v2" "strings" "testing" "time" diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 584b50fe5530..279cd5ccb1b4 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -25,7 +25,7 @@ import ( "fmt" "io" "math" - "math/rand" + rand "math/rand/v2" "net" "net/http" "strconv" @@ -1455,7 +1455,7 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := rand.Int63n(2*r) - r + j := rand.Int64N(2*r) - r return time.Duration(j) } diff --git a/internal/wrr/random.go b/internal/wrr/random.go index 3f611a35059a..0913ed676493 100644 --- a/internal/wrr/random.go +++ b/internal/wrr/random.go @@ -19,7 +19,7 @@ package wrr import ( "fmt" - "math/rand" + rand "math/rand/v2" "sort" ) @@ -46,19 +46,19 @@ func NewRandom() WRR { return &randomWRR{} } -var randInt63n = rand.Int63n +var randInt64n = rand.Int64N func (rw *randomWRR) Next() (item any) { if len(rw.items) == 0 { return nil } if rw.equalWeights { - return rw.items[randInt63n(int64(len(rw.items)))].item + return rw.items[randInt64n(int64(len(rw.items)))].item } sumOfWeights := rw.items[len(rw.items)-1].accumulatedWeight // Random number in [0, sumOfWeights). - randomWeight := randInt63n(sumOfWeights) + randomWeight := randInt64n(sumOfWeights) // Item's accumulated weights are in ascending order, because item's weight >= 0. // Binary search rw.items to find first item whose accumulatedWeight > randomWeight // The return i is guaranteed to be in range [0, len(rw.items)) because randomWeight < last item's accumulatedWeight diff --git a/internal/wrr/wrr_test.go b/internal/wrr/wrr_test.go index 7ede1fff902e..4a7d81eb1f95 100644 --- a/internal/wrr/wrr_test.go +++ b/internal/wrr/wrr_test.go @@ -20,7 +20,7 @@ package wrr import ( "errors" "math" - "math/rand" + rand "math/rand/v2" "strconv" "testing" @@ -146,7 +146,7 @@ func BenchmarkRandomWRRNext(b *testing.B) { w := NewRandom() var sumOfWeights int64 for i := 0; i < n; i++ { - weight := rand.Int63n(maxWeight + 1) + weight := rand.Int64N(maxWeight + 1) w.Add(i, weight) sumOfWeights += weight } @@ -188,6 +188,6 @@ func BenchmarkRandomWRRNext(b *testing.B) { } func init() { - r := rand.New(rand.NewSource(0)) - randInt63n = r.Int63n + r := rand.New(rand.NewPCG(0, 0)) + randInt64n = r.Int64N } diff --git a/interop/stress/client/main.go b/interop/stress/client/main.go index 2defe15d9cb1..8b22716bf0d4 100644 --- a/interop/stress/client/main.go +++ b/interop/stress/client/main.go @@ -23,7 +23,7 @@ import ( "context" "flag" "fmt" - "math/rand" + rand "math/rand/v2" "net" "os" "strconv" @@ -130,7 +130,7 @@ func newWeightedRandomTestSelector(tests []testCaseWithWeight) *weightedRandomTe } func (selector weightedRandomTestSelector) getNextTest() string { - random := rand.Intn(selector.totalWeight) + random := rand.IntN(selector.totalWeight) var weightSofar int for _, test := range selector.tests { weightSofar += test.weight diff --git a/stream.go b/stream.go index bb2b2a216ce2..b2d82c364d7d 100644 --- a/stream.go +++ b/stream.go @@ -23,7 +23,7 @@ import ( "errors" "io" "math" - "math/rand" + rand "math/rand/v2" "strconv" "sync" "time" @@ -710,7 +710,7 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { if max := float64(rp.MaxBackoff); cur > max { cur = max } - dur = time.Duration(rand.Int63n(int64(cur))) + dur = time.Duration(rand.Int64N(int64(cur))) cs.numRetriesSincePushback++ } diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index fab8097e41b7..0f9cb52b7e8f 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -28,7 +28,7 @@ package googledirectpath import ( "encoding/json" "fmt" - "math/rand" + rand "math/rand/v2" "net/url" "sync" "time" diff --git a/xds/internal/balancer/outlierdetection/balancer.go b/xds/internal/balancer/outlierdetection/balancer.go index 4ccff08b51ec..c9d496ce09b9 100644 --- a/xds/internal/balancer/outlierdetection/balancer.go +++ b/xds/internal/balancer/outlierdetection/balancer.go @@ -25,7 +25,7 @@ import ( "encoding/json" "fmt" "math" - "math/rand" + rand "math/rand/v2" "strings" "sync" "sync/atomic" @@ -824,7 +824,7 @@ func (b *outlierDetectionBalancer) successRateAlgorithm() { requiredSuccessRate := mean - stddev*(float64(ejectionCfg.StdevFactor)/1000) if successRate < requiredSuccessRate { channelz.Infof(logger, b.channelzParent, "SuccessRate algorithm detected outlier: %s. Parameters: successRate=%f, mean=%f, stddev=%f, requiredSuccessRate=%f", addrInfo, successRate, mean, stddev, requiredSuccessRate) - if uint32(rand.Int31n(100)) < ejectionCfg.EnforcementPercentage { + if uint32(rand.Int32N(100)) < ejectionCfg.EnforcementPercentage { b.ejectAddress(addrInfo) } } @@ -851,7 +851,7 @@ func (b *outlierDetectionBalancer) failurePercentageAlgorithm() { failurePercentage := (float64(bucket.numFailures) / float64(bucket.numSuccesses+bucket.numFailures)) * 100 if failurePercentage > float64(b.cfg.FailurePercentageEjection.Threshold) { channelz.Infof(logger, b.channelzParent, "FailurePercentage algorithm detected outlier: %s, failurePercentage=%f", addrInfo, failurePercentage) - if uint32(rand.Int31n(100)) < ejectionCfg.EnforcementPercentage { + if uint32(rand.Int32N(100)) < ejectionCfg.EnforcementPercentage { b.ejectAddress(addrInfo) } } diff --git a/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go b/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go index 6765d827a432..44503b2cd9e8 100644 --- a/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go +++ b/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go @@ -23,7 +23,7 @@ import ( "errors" "fmt" "math" - "math/rand" + rand "math/rand/v2" "net" "slices" "testing" diff --git a/xds/internal/httpfilter/fault/fault.go b/xds/internal/httpfilter/fault/fault.go index 5a82490598a3..0ffa9c827279 100644 --- a/xds/internal/httpfilter/fault/fault.go +++ b/xds/internal/httpfilter/fault/fault.go @@ -24,7 +24,7 @@ import ( "errors" "fmt" "io" - "math/rand" + rand "math/rand/v2" "strconv" "sync/atomic" "time" @@ -162,7 +162,7 @@ func (i *interceptor) NewStream(ctx context.Context, _ iresolver.RPCInfo, done f } // For overriding in tests -var randIntn = rand.Intn +var randIntn = rand.IntN var newTimer = time.NewTimer func injectDelay(ctx context.Context, delayCfg *cpb.FaultDelay) error { diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index bec9f4c2abbe..f9f6a274be70 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -26,7 +26,7 @@ import ( "context" "fmt" "io" - "math/rand" + rand "math/rand/v2" "net" "reflect" "testing" @@ -471,7 +471,7 @@ func (s) TestFaultInjection_Unary(t *testing.T) { for tcNum, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - defer func() { randIntn = rand.Intn; newTimer = time.NewTimer }() + defer func() { randIntn = rand.IntN; newTimer = time.NewTimer }() var intnCalls []int var newTimerCalls []time.Duration randOut := 0 diff --git a/xds/internal/resolver/serviceconfig.go b/xds/internal/resolver/serviceconfig.go index 36776f3debdf..7df75465ac6d 100644 --- a/xds/internal/resolver/serviceconfig.go +++ b/xds/internal/resolver/serviceconfig.go @@ -23,7 +23,7 @@ import ( "encoding/json" "fmt" "math/bits" - "math/rand" + rand "math/rand/v2" "strings" "sync/atomic" "time" diff --git a/xds/internal/resolver/xds_resolver.go b/xds/internal/resolver/xds_resolver.go index de339a7c9b69..1ba6c001d93d 100644 --- a/xds/internal/resolver/xds_resolver.go +++ b/xds/internal/resolver/xds_resolver.go @@ -22,7 +22,7 @@ package resolver import ( "context" "fmt" - "math/rand" + rand "math/rand/v2" "sync/atomic" "google.golang.org/grpc/internal" diff --git a/xds/internal/xdsclient/xdsresource/matcher.go b/xds/internal/xdsclient/xdsresource/matcher.go index 796e9e3008de..798f61884923 100644 --- a/xds/internal/xdsclient/xdsresource/matcher.go +++ b/xds/internal/xdsclient/xdsresource/matcher.go @@ -19,7 +19,7 @@ package xdsresource import ( "fmt" - "math/rand" + rand "math/rand/v2" "strings" "google.golang.org/grpc/internal/grpcutil" @@ -142,11 +142,11 @@ func newFractionMatcher(fraction uint32) *fractionMatcher { return &fractionMatcher{fraction: int64(fraction)} } -// RandInt63n overwrites rand for control in tests. -var RandInt63n = rand.Int63n +// RandInt64n overwrites rand for control in tests. +var RandInt64n = rand.Int64N func (fm *fractionMatcher) match() bool { - t := RandInt63n(1000000) + t := RandInt64n(1000000) return t <= fm.fraction } diff --git a/xds/internal/xdsclient/xdsresource/matcher_test.go b/xds/internal/xdsclient/xdsresource/matcher_test.go index 5d694c741578..de7e3e9be49f 100644 --- a/xds/internal/xdsclient/xdsresource/matcher_test.go +++ b/xds/internal/xdsclient/xdsresource/matcher_test.go @@ -19,7 +19,7 @@ package xdsresource import ( "context" - "math/rand" + rand "math/rand/v2" "testing" "github.com/google/go-cmp/cmp" @@ -119,11 +119,11 @@ func (s) TestFractionMatcherMatch(t *testing.T) { const fraction = 500000 fm := newFractionMatcher(fraction) defer func() { - RandInt63n = rand.Int63n + RandInt64n = rand.Int64N }() // rand > fraction, should return false. - RandInt63n = func(int64) int64 { + RandInt64n = func(int64) int64 { return fraction + 1 } if matched := fm.match(); matched { @@ -131,7 +131,7 @@ func (s) TestFractionMatcherMatch(t *testing.T) { } // rand == fraction, should return true. - RandInt63n = func(int64) int64 { + RandInt64n = func(int64) int64 { return fraction } if matched := fm.match(); !matched { @@ -139,7 +139,7 @@ func (s) TestFractionMatcherMatch(t *testing.T) { } // rand < fraction, should return true. - RandInt63n = func(int64) int64 { + RandInt64n = func(int64) int64 { return fraction - 1 } if matched := fm.match(); !matched { From 2e3f5470495ebc16cb56d4e6cc490d4112ab998f Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 29 Oct 2024 13:00:10 -0700 Subject: [PATCH 38/57] ringhash: fix a couple of flakes in e2e style tests (#7784) --- .../ringhash/e2e/ringhash_balancer_test.go | 101 ++++++++---------- 1 file changed, 44 insertions(+), 57 deletions(-) diff --git a/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go b/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go index 44503b2cd9e8..94efa99dcdf0 100644 --- a/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go +++ b/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go @@ -220,28 +220,29 @@ func checkRPCSendOK(ctx context.Context, t *testing.T, client testgrpc.TestServi return backendCount } -// makeNonExistentBackends returns a slice of strings with num listeners, each -// of which is closed immediately. Useful to simulate servers that are -// unreachable. -func makeNonExistentBackends(t *testing.T, num int) []string { +// makeUnreachableBackends returns a slice of addresses of backends that close +// connections as soon as they are established. Useful to simulate servers that +// are unreachable. +func makeUnreachableBackends(t *testing.T, num int) []string { t.Helper() - closedListeners := make([]net.Listener, 0, num) + addrs := make([]string, 0, num) for i := 0; i < num; i++ { - lis, err := testutils.LocalTCPListener() + l, err := testutils.LocalTCPListener() if err != nil { t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } - closedListeners = append(closedListeners, lis) - } - - // Stop the servers that we want to be unreachable and collect their - // addresses. We don't close them in the loop above to make sure ports are - // not reused across them. - addrs := make([]string, 0, num) - for _, lis := range closedListeners { + lis := testutils.NewRestartableListener(l) addrs = append(addrs, lis.Addr().String()) - lis.Close() + + // It is enough to fail the first connection attempt to put the subchannel + // in TRANSIENT_FAILURE. + go func() { lis.Accept() }() + + // We don't close these listeners here, to make sure ports are + // not reused across them, and across tests. + lis.Stop() + t.Cleanup(func() { lis.Close() }) } return addrs } @@ -304,7 +305,7 @@ func (s) TestRingHash_AggregateClusterFallBackFromRingHashAtStartup(t *testing.T Localities: []e2e.LocalityOptions{{ Name: "locality0", Weight: 1, - Backends: backendOptions(t, makeNonExistentBackends(t, 2)), + Backends: backendOptions(t, makeUnreachableBackends(t, 2)), }}, }) ep2 := e2e.EndpointResourceWithOptions(e2e.EndpointOptions{ @@ -403,7 +404,7 @@ func (s) TestRingHash_AggregateClusterFallBackFromRingHashToLogicalDnsAtStartup( Localities: []e2e.LocalityOptions{{ Name: "locality0", Weight: 1, - Backends: backendOptions(t, makeNonExistentBackends(t, 1)), + Backends: backendOptions(t, makeUnreachableBackends(t, 1)), Priority: 0, }}, }) @@ -480,7 +481,7 @@ func (s) TestRingHash_AggregateClusterFallBackFromRingHashToLogicalDnsAtStartupN Localities: []e2e.LocalityOptions{{ Name: "locality0", Weight: 1, - Backends: backendOptions(t, makeNonExistentBackends(t, 1)), + Backends: backendOptions(t, makeUnreachableBackends(t, 1)), Priority: 0, }}, }) @@ -1369,17 +1370,11 @@ func (s) TestRingHash_IdleToReady(t *testing.T) { t.Fatalf("Failed to create client: %s", err) } defer conn.Close() - client := testgrpc.NewTestServiceClient(conn) - - if got, want := conn.GetState(), connectivity.Idle; got != want { - t.Errorf("conn.GetState(): got %v, want %v", got, want) - } + testutils.AwaitState(ctx, t, conn, connectivity.Idle) + client := testgrpc.NewTestServiceClient(conn) checkRPCSendOK(ctx, t, client, 1) - - if got, want := conn.GetState(), connectivity.Ready; got != want { - t.Errorf("conn.GetState(): got %v, want %v", got, want) - } + testutils.AwaitState(ctx, t, conn, connectivity.Ready) } // Test that the channel will transition to READY once it starts @@ -1395,10 +1390,10 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicks(t *testing.T) { }) defer backend.Stop() - nonExistentServerAddr := makeNonExistentBackends(t, 1)[0] + unReachableServerAddr := makeUnreachableBackends(t, 1)[0] const clusterName = "cluster" - endpoints := endpointResource(t, clusterName, []string{backend.Address, nonExistentServerAddr}) + endpoints := endpointResource(t, clusterName, []string{backend.Address, unReachableServerAddr}) cluster := e2e.ClusterResourceWithOptions(e2e.ClusterOptions{ ClusterName: clusterName, ServiceName: clusterName, @@ -1432,7 +1427,7 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicks(t *testing.T) { rpcCtx, rpcCancel := context.WithCancel(ctx) go func() { - rpcCtx = metadata.NewOutgoingContext(rpcCtx, metadata.Pairs("address_hash", nonExistentServerAddr+"_0")) + rpcCtx = metadata.NewOutgoingContext(rpcCtx, metadata.Pairs("address_hash", unReachableServerAddr+"_0")) _, err := client.EmptyCall(rpcCtx, &testpb.Empty{}) if status.Code(err) != codes.Canceled { t.Errorf("Expected RPC to be canceled, got error: %v", err) @@ -1459,10 +1454,10 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicks(t *testing.T) { // will move on to the next ring hash entry. func (s) TestRingHash_TransientFailureCheckNextOne(t *testing.T) { backends := startTestServiceBackends(t, 1) - nonExistentBackends := makeNonExistentBackends(t, 1) + unReachableBackends := makeUnreachableBackends(t, 1) const clusterName = "cluster" - endpoints := endpointResource(t, clusterName, append(nonExistentBackends, backends...)) + endpoints := endpointResource(t, clusterName, append(unReachableBackends, backends...)) cluster := e2e.ClusterResourceWithOptions(e2e.ClusterOptions{ ClusterName: clusterName, ServiceName: clusterName, @@ -1489,7 +1484,7 @@ func (s) TestRingHash_TransientFailureCheckNextOne(t *testing.T) { // Note each type of RPC contains a header value that will always be hashed // the value that was used to place the non-existent endpoint on the ring, // but it still gets routed to the backend that is up. - ctx = metadata.NewOutgoingContext(ctx, metadata.Pairs("address_hash", nonExistentBackends[0]+"_0")) + ctx = metadata.NewOutgoingContext(ctx, metadata.Pairs("address_hash", unReachableBackends[0]+"_0")) reqPerBackend := checkRPCSendOK(ctx, t, client, 1) var got string for got = range reqPerBackend { @@ -1530,24 +1525,18 @@ func (s) TestRingHash_ReattemptWhenGoingFromTransientFailureToIdle(t *testing.T) t.Fatalf("Failed to create client: %s", err) } defer conn.Close() - client := testgrpc.NewTestServiceClient(conn) - - if got, want := conn.GetState(), connectivity.Idle; got != want { - t.Errorf("conn.GetState(): got %v, want %v", got, want) - } + testutils.AwaitState(ctx, t, conn, connectivity.Idle) // There are no endpoints in EDS. RPCs should fail and the channel should // transition to transient failure. + client := testgrpc.NewTestServiceClient(conn) if _, err = client.EmptyCall(ctx, &testpb.Empty{}); err == nil { t.Errorf("rpc EmptyCall() succeeded, want error") } - if got, want := conn.GetState(), connectivity.TransientFailure; got != want { - t.Errorf("conn.GetState(): got %v, want %v", got, want) - } - - backends := startTestServiceBackends(t, 1) + testutils.AwaitState(ctx, t, conn, connectivity.TransientFailure) t.Log("Updating EDS with a new backend endpoint.") + backends := startTestServiceBackends(t, 1) endpoints = e2e.EndpointResourceWithOptions(e2e.EndpointOptions{ ClusterName: clusterName, Localities: []e2e.LocalityOptions{{ @@ -1563,9 +1552,7 @@ func (s) TestRingHash_ReattemptWhenGoingFromTransientFailureToIdle(t *testing.T) if _, err = client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { t.Errorf("rpc EmptyCall() failed: %v", err) } - if got, want := conn.GetState(), connectivity.Ready; got != want { - t.Errorf("conn.GetState(): got %v, want %v", got, want) - } + testutils.AwaitState(ctx, t, conn, connectivity.Ready) } // Tests that when all backends are down and then up, we may pick a TF backend @@ -1596,11 +1583,11 @@ func (s) TestRingHash_TransientFailureSkipToAvailableReady(t *testing.T) { }) defer restartableServer2.Stop() - nonExistentBackends := makeNonExistentBackends(t, 2) + unReachableBackends := makeUnreachableBackends(t, 2) const clusterName = "cluster" backends := []string{restartableServer1.Address, restartableServer2.Address} - backends = append(backends, nonExistentBackends...) + backends = append(backends, unReachableBackends...) endpoints := endpointResource(t, clusterName, backends) cluster := e2e.ClusterResourceWithOptions(e2e.ClusterOptions{ ClusterName: clusterName, @@ -1862,11 +1849,11 @@ func (s) TestRingHash_SwitchToLowerPriorityAndThenBack(t *testing.T) { // so for only one subchannel at a time. func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *testing.T) { backends := startTestServiceBackends(t, 1) - nonExistentBackends := makeNonExistentBackends(t, 3) + unReachableBackends := makeUnreachableBackends(t, 3) const clusterName = "cluster" - endpoints := endpointResource(t, clusterName, append(nonExistentBackends, backends...)) + endpoints := endpointResource(t, clusterName, append(unReachableBackends, backends...)) cluster := e2e.ClusterResourceWithOptions(e2e.ClusterOptions{ ClusterName: clusterName, ServiceName: clusterName, @@ -1897,15 +1884,15 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *tes defer conn.Close() client := testgrpc.NewTestServiceClient(conn) - holdNonExistent0 := dialer.Hold(nonExistentBackends[0]) - holdNonExistent1 := dialer.Hold(nonExistentBackends[1]) - holdNonExistent2 := dialer.Hold(nonExistentBackends[2]) + holdNonExistent0 := dialer.Hold(unReachableBackends[0]) + holdNonExistent1 := dialer.Hold(unReachableBackends[1]) + holdNonExistent2 := dialer.Hold(unReachableBackends[2]) holdGood := dialer.Hold(backends[0]) rpcCtx, rpcCancel := context.WithCancel(ctx) errCh := make(chan error, 1) go func() { - rpcCtx = metadata.NewOutgoingContext(rpcCtx, metadata.Pairs("address_hash", nonExistentBackends[0]+"_0")) + rpcCtx = metadata.NewOutgoingContext(rpcCtx, metadata.Pairs("address_hash", unReachableBackends[0]+"_0")) _, err := client.EmptyCall(rpcCtx, &testpb.Empty{}) if status.Code(err) == codes.Canceled { errCh <- nil @@ -1939,7 +1926,7 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *tes // Allow the connection attempt to the first address to resume and wait for // the attempt for the second address. No other connection attempts should // be started yet. - holdNonExistent0Again := dialer.Hold(nonExistentBackends[0]) + holdNonExistent0Again := dialer.Hold(unReachableBackends[0]) holdNonExistent0.Resume() if !holdNonExistent1.Wait(ctx) { t.Fatalf("Timeout waiting for connection attempt to backend 1") @@ -1957,7 +1944,7 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *tes // Allow the connection attempt to the second address to resume and wait for // the attempt for the third address. No other connection attempts should // be started yet. - holdNonExistent1Again := dialer.Hold(nonExistentBackends[1]) + holdNonExistent1Again := dialer.Hold(unReachableBackends[1]) holdNonExistent1.Resume() if !holdNonExistent2.Wait(ctx) { t.Fatalf("Timeout waiting for connection attempt to backend 2") @@ -1975,7 +1962,7 @@ func (s) TestRingHash_ContinuesConnectingWithoutPicksOneSubchannelAtATime(t *tes // Allow the connection attempt to the third address to resume and wait // for the attempt for the final address. No other connection attempts // should be started yet. - holdNonExistent2Again := dialer.Hold(nonExistentBackends[2]) + holdNonExistent2Again := dialer.Hold(unReachableBackends[2]) holdNonExistent2.Resume() if !holdGood.Wait(ctx) { t.Fatalf("Timeout waiting for connection attempt to good backend") From d66fc3a1efa1dfb33dfedf9760528f1ac2b923b6 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Tue, 29 Oct 2024 16:59:48 -0400 Subject: [PATCH 39/57] balancer/endpointsharding: Call ExitIdle() on child if child reports IDLE (#7782) --- balancer/endpointsharding/endpointsharding.go | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/balancer/endpointsharding/endpointsharding.go b/balancer/endpointsharding/endpointsharding.go index 9238d3278204..b5b92143194b 100644 --- a/balancer/endpointsharding/endpointsharding.go +++ b/balancer/endpointsharding/endpointsharding.go @@ -66,7 +66,9 @@ type endpointSharding struct { cc balancer.ClientConn bOpts balancer.BuildOptions + childMu sync.Mutex // syncs balancer.Balancer calls into children children atomic.Pointer[resolver.EndpointMap] + closed bool // inhibitChildUpdates is set during UpdateClientConnState/ResolverError // calls (calls to children will each produce an update, only want one @@ -83,6 +85,9 @@ type endpointSharding struct { // addresses it will ignore that endpoint. Otherwise, returns first error found // from a child, but fully processes the new update. func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState) error { + es.childMu.Lock() + defer es.childMu.Unlock() + es.inhibitChildUpdates.Store(true) defer func() { es.inhibitChildUpdates.Store(false) @@ -145,6 +150,8 @@ func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState // children and sends a single synchronous update of the childStates at the end // of the ResolverError operation. func (es *endpointSharding) ResolverError(err error) { + es.childMu.Lock() + defer es.childMu.Unlock() es.inhibitChildUpdates.Store(true) defer func() { es.inhibitChildUpdates.Store(false) @@ -162,11 +169,14 @@ func (es *endpointSharding) UpdateSubConnState(balancer.SubConn, balancer.SubCon } func (es *endpointSharding) Close() { + es.childMu.Lock() + defer es.childMu.Unlock() children := es.children.Load() for _, child := range children.Values() { bal := child.(balancer.Balancer) bal.Close() } + es.closed = true } // updateState updates this component's state. It sends the aggregated state, @@ -274,6 +284,17 @@ func (bw *balancerWrapper) UpdateState(state balancer.State) { bw.es.mu.Lock() bw.childState.State = state bw.es.mu.Unlock() + // When a child balancer says it's IDLE, ping it to exit idle and reconnect. + // TODO: In the future, perhaps make this a knob in configuration. + if ei, ok := bw.Balancer.(balancer.ExitIdler); state.ConnectivityState == connectivity.Idle && ok { + go func() { + bw.es.childMu.Lock() + if !bw.es.closed { + ei.ExitIdle() + } + bw.es.childMu.Unlock() + }() + } bw.es.updateState() } From ef0f6177dd5b69452ace639237500e746d7ccb45 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Fri, 1 Nov 2024 08:49:58 -0700 Subject: [PATCH 40/57] xdsclient: start using the newly added transport and channel functionalities (#7773) --- xds/internal/xdsclient/authority.go | 919 +++++++++--------- xds/internal/xdsclient/authority_test.go | 295 ------ xds/internal/xdsclient/channel.go | 37 + xds/internal/xdsclient/channel_test.go | 6 +- xds/internal/xdsclient/client_new.go | 61 +- xds/internal/xdsclient/client_refcounted.go | 8 +- xds/internal/xdsclient/clientimpl.go | 375 ++++++- .../xdsclient/clientimpl_authority.go | 146 --- xds/internal/xdsclient/clientimpl_dump.go | 5 +- .../xdsclient/clientimpl_loadreport.go | 18 +- xds/internal/xdsclient/clientimpl_watchers.go | 69 +- xds/internal/xdsclient/internal/internal.go | 1 - xds/internal/xdsclient/logging.go | 4 - .../tests/ads_stream_flow_control_test.go | 2 +- .../xdsclient/tests/ads_stream_watch_test.go | 206 ++++ .../xdsclient/tests/authority_test.go | 54 +- xds/internal/xdsclient/tests/helpers_test.go | 8 +- .../xdsclient/transport/internal/internal.go | 25 - .../xdsclient/transport/loadreport.go | 259 ----- .../xdsclient/transport/lrs/lrs_stream.go | 11 +- xds/internal/xdsclient/transport/transport.go | 702 ------------- .../xdsclient/transport/transport_new_test.go | 100 -- .../xdsclient/transport/transport_test.go | 98 -- 23 files changed, 1173 insertions(+), 2236 deletions(-) delete mode 100644 xds/internal/xdsclient/authority_test.go delete mode 100644 xds/internal/xdsclient/clientimpl_authority.go create mode 100644 xds/internal/xdsclient/tests/ads_stream_watch_test.go delete mode 100644 xds/internal/xdsclient/transport/internal/internal.go delete mode 100644 xds/internal/xdsclient/transport/loadreport.go delete mode 100644 xds/internal/xdsclient/transport/transport.go delete mode 100644 xds/internal/xdsclient/transport/transport_new_test.go delete mode 100644 xds/internal/xdsclient/transport/transport_test.go diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index e1fce2340e67..bd1662e8bca7 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -19,18 +19,14 @@ package xdsclient import ( "context" - "errors" "fmt" - "strings" - "sync" "sync/atomic" - "time" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog" + igrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/xds/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/load" - "google.golang.org/grpc/xds/internal/xdsclient/transport" + "google.golang.org/grpc/xds/internal/xdsclient/transport/ads" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/timestamppb" @@ -39,44 +35,60 @@ import ( v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" ) -type watchState int - -const ( - watchStateStarted watchState = iota // Watch started, request not yet set. - watchStateRequested // Request sent for resource being watched. - watchStateReceived // Response received for resource being watched. - watchStateTimeout // Watch timer expired, no response. - watchStateCanceled // Watch cancelled. -) - type resourceState struct { - watchers map[xdsresource.ResourceWatcher]bool // Set of watchers for this resource - cache xdsresource.ResourceData // Most recent ACKed update for this resource - md xdsresource.UpdateMetadata // Metadata for the most recent update - deletionIgnored bool // True if resource deletion was ignored for a prior update - - // Common watch state for all watchers of this resource. - wTimer *time.Timer // Expiry timer - wState watchState // State of the watch + watchers map[xdsresource.ResourceWatcher]bool // Set of watchers for this resource. + cache xdsresource.ResourceData // Most recent ACKed update for this resource. + md xdsresource.UpdateMetadata // Metadata for the most recent update. + deletionIgnored bool // True, if resource deletion was ignored for a prior update. + xdsChannelConfigs []*xdsChannelWithConfig // List of xdsChannels where this resource is subscribed. +} + +// xdsChannelForADS is used to acquire a reference to an xdsChannel. This +// functionality is provided by the xdsClient. +// +// The arguments to the function are as follows: +// - the server config for the xdsChannel +// - the calling authority on which a set of callbacks are invoked by the +// xdsChannel on ADS stream events +// +// Returns a reference to the xdsChannel and a function to release the same. A +// non-nil error is returned if the channel creation fails and the first two +// return values are meaningless in this case. +type xdsChannelForADS func(*bootstrap.ServerConfig, *authority) (*xdsChannel, func(), error) + +// xdsChannelWithConfig is a struct that holds an xdsChannel and its associated +// ServerConfig, along with a cleanup function to release the xdsChannel. +type xdsChannelWithConfig struct { + xc *xdsChannel + sc *bootstrap.ServerConfig + cleanup func() } -// authority wraps all state associated with a single management server. It -// contains the transport used to communicate with the management server and a -// cache of resource state for resources requested from the management server. +// authority provides the functionality required to communicate with a +// management server corresponding to an authority name specified in the +// bootstrap configuration. // -// Bootstrap configuration could contain multiple entries in the authorities map -// that share the same server config (server address and credentials to use). We -// share the same authority instance amongst these entries, and the reference -// counting is taken care of by the `clientImpl` type. +// It holds references to one or more xdsChannels, one for each server +// configuration in the bootstrap, to allow fallback from a primary management +// server to a secondary management server. Authorities that contain similar +// server configuration entries will end up sharing the xdsChannel for that +// server configuration. The xdsChannels are owned and managed by the xdsClient. +// +// It also contains a cache of resource state for resources requested from +// management server(s). This cache contains the list of registered watchers and +// the most recent resource configuration received from the management server. type authority struct { - serverCfg *bootstrap.ServerConfig // Server config for this authority - bootstrapCfg *bootstrap.Config // Full bootstrap configuration - refCount int // Reference count of watches referring to this authority - serializer *grpcsync.CallbackSerializer // Callback serializer for invoking watch callbacks - resourceTypeGetter func(string) xdsresource.Type // ResourceType registry lookup - transport *transport.Transport // Underlying xDS transport to the management server - watchExpiryTimeout time.Duration // Resource watch expiry timeout - logger *grpclog.PrefixLogger + // The following fields are initialized at creation time and are read-only + // afterwards, and therefore don't need to be protected with a mutex. + name string // Name of the authority from bootstrap configuration. + watcherCallbackSerializer *grpcsync.CallbackSerializer // Serializer to run watcher callbacks, owned by the xDS client implementation. + getChannelForADS xdsChannelForADS // Function to get an xdsChannel for ADS, provided by the xDS client implementation. + xdsClientSerializer *grpcsync.CallbackSerializer // Serializer to run call ins from the xDS client, owned by this authority. + xdsClientSerializerClose func() // Function to close the above serializer. + logger *igrpclog.PrefixLogger // Logger for this authority. + + // The below defined fields must only be accessed in the context of the + // serializer callback, owned by this authority. // A two level map containing the state of all the resources being watched. // @@ -86,89 +98,133 @@ type authority struct { // // The second level map key is the resource name, with the value being the // actual state of the resource. - resourcesMu sync.Mutex - resources map[xdsresource.Type]map[string]*resourceState - closed bool + resources map[xdsresource.Type]map[string]*resourceState + + // An ordered list of xdsChannels corresponding to the list of server + // configurations specified for this authority in the bootstrap. The + // ordering specifies the order in which these channels are preferred for + // fallback. + xdsChannelConfigs []*xdsChannelWithConfig + + // The current active xdsChannel. Here, active does not mean that the + // channel has a working connection to the server. It simply points to the + // channel that we are trying to work with, based on fallback logic. + activeXDSChannel *xdsChannelWithConfig } -// authorityArgs is a convenience struct to wrap arguments required to create a -// new authority. All fields here correspond directly to appropriate fields -// stored in the authority struct. -type authorityArgs struct { - // The reason for passing server config and bootstrap config separately - // (although the former is part of the latter) is because authorities in the - // bootstrap config might contain an empty server config, and in this case, - // the top-level server config is to be used. - serverCfg *bootstrap.ServerConfig - bootstrapCfg *bootstrap.Config - serializer *grpcsync.CallbackSerializer - resourceTypeGetter func(string) xdsresource.Type - watchExpiryTimeout time.Duration - backoff func(int) time.Duration // Backoff for ADS and LRS stream failures. - logger *grpclog.PrefixLogger +// authorityBuildOptions wraps arguments required to create a new authority. +type authorityBuildOptions struct { + serverConfigs bootstrap.ServerConfigs // Server configs for the authority + name string // Name of the authority + serializer *grpcsync.CallbackSerializer // Callback serializer for invoking watch callbacks + getChannelForADS xdsChannelForADS // Function to acquire a reference to an xdsChannel + logPrefix string // Prefix for logging } -func newAuthority(args authorityArgs) (*authority, error) { +// newAuthority creates a new authority instance with the provided +// configuration. The authority is responsible for managing the state of +// resources requested from the management server, as well as acquiring and +// releasing references to channels used to communicate with the management +// server. +// +// Note that no channels to management servers are created at this time. Instead +// a channel to the first server configuration is created when the first watch +// is registered, and more channels are created as needed by the fallback logic. +func newAuthority(args authorityBuildOptions) *authority { + ctx, cancel := context.WithCancel(context.Background()) + l := grpclog.Component("xds") + logPrefix := args.logPrefix + fmt.Sprintf("[authority %q] ", args.name) ret := &authority{ - serverCfg: args.serverCfg, - bootstrapCfg: args.bootstrapCfg, - serializer: args.serializer, - resourceTypeGetter: args.resourceTypeGetter, - watchExpiryTimeout: args.watchExpiryTimeout, - logger: args.logger, - resources: make(map[xdsresource.Type]map[string]*resourceState), + name: args.name, + watcherCallbackSerializer: args.serializer, + getChannelForADS: args.getChannelForADS, + xdsClientSerializer: grpcsync.NewCallbackSerializer(ctx), + xdsClientSerializerClose: cancel, + logger: igrpclog.NewPrefixLogger(l, logPrefix), + resources: make(map[xdsresource.Type]map[string]*resourceState), + } + + // Create an ordered list of xdsChannels with their server configs. The + // actual channel to the first server configuration is created when the + // first watch is registered, and channels to other server configurations + // are created as needed to support fallback. + for _, sc := range args.serverConfigs { + ret.xdsChannelConfigs = append(ret.xdsChannelConfigs, &xdsChannelWithConfig{sc: sc}) } + return ret +} - tr, err := transport.New(transport.Options{ - ServerCfg: args.serverCfg, - OnRecvHandler: ret.handleResourceUpdate, - OnErrorHandler: ret.newConnectionError, - OnSendHandler: ret.transportOnSendHandler, - Backoff: args.backoff, - Logger: args.logger, - NodeProto: args.bootstrapCfg.Node(), +// adsStreamFailure is called to notify the authority about an ADS stream +// failure on an xdsChannel to the management server identified by the provided +// server config. The error is forwarded to all the resource watchers. +// +// This method is called by the xDS client implementation (on all interested +// authorities) when a stream error is reported by an xdsChannel. +// +// Errors of type xdsresource.ErrTypeStreamFailedAfterRecv are ignored. +func (a *authority) adsStreamFailure(serverConfig *bootstrap.ServerConfig, err error) { + a.xdsClientSerializer.TrySchedule(func(context.Context) { + a.handleADSStreamFailure(serverConfig, err) }) - if err != nil { - return nil, fmt.Errorf("creating new transport to %q: %v", args.serverCfg, err) - } - ret.transport = tr - return ret, nil } -// transportOnSendHandler is called by the underlying transport when it sends a -// resource request successfully. Timers are activated for resources waiting for -// a response. -func (a *authority) transportOnSendHandler(u *transport.ResourceSendInfo) { - rType := a.resourceTypeGetter(u.URL) - // Resource type not found is not expected under normal circumstances, since - // the resource type url passed to the transport is determined by the authority. - if rType == nil { - a.logger.Warningf("Unknown resource type url: %s.", u.URL) - return +// Handles ADS stream failure by invoking watch callbacks and triggering +// fallback if the associated conditions are met. +// +// Only executed in the context of a serializer callback. +func (a *authority) handleADSStreamFailure(serverConfig *bootstrap.ServerConfig, err error) { + if a.logger.V(2) { + a.logger.Infof("Connection to server %s failed with error: %v", serverConfig, err) } - a.resourcesMu.Lock() - defer a.resourcesMu.Unlock() - a.startWatchTimersLocked(rType, u.ResourceNames) -} -func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate, onDone func()) error { - rType := a.resourceTypeGetter(resourceUpdate.URL) - if rType == nil { - return xdsresource.NewErrorf(xdsresource.ErrorTypeResourceTypeUnsupported, "Resource URL %v unknown in response from server", resourceUpdate.URL) + // We do not consider it an error if the ADS stream was closed after having + // received a response on the stream. This is because there are legitimate + // reasons why the server may need to close the stream during normal + // operations, such as needing to rebalance load or the underlying + // connection hitting its max connection age limit. See gRFC A57 for more + // details. + if xdsresource.ErrType(err) == xdsresource.ErrTypeStreamFailedAfterRecv { + a.logger.Warningf("Watchers not notified since ADS stream failed after having received at least one response: %v", err) + return } - opts := &xdsresource.DecodeOptions{ - BootstrapConfig: a.bootstrapCfg, - ServerConfig: a.serverCfg, + // Propagate the connection error from the transport layer to all watchers. + for _, rType := range a.resources { + for _, state := range rType { + for watcher := range state.watchers { + watcher := watcher + a.watcherCallbackSerializer.TrySchedule(func(context.Context) { + watcher.OnError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err), func() {}) + }) + } + } } - updates, md, err := decodeAllResources(opts, rType, resourceUpdate) - a.updateResourceStateAndScheduleCallbacks(rType, updates, md, onDone) - return err + + // TODO(easwars-fallback): Trigger fallback here if conditions for fallback + // are met. +} + +// adsResourceUpdate is called to notify the authority about a resource update +// received on the ADS stream. +// +// This method is called by the xDS client implementation (on all interested +// authorities) when a stream error is reported by an xdsChannel. +func (a *authority) adsResourceUpdate(serverConfig *bootstrap.ServerConfig, rType xdsresource.Type, updates map[string]ads.DataAndErrTuple, md xdsresource.UpdateMetadata, onDone func()) { + a.xdsClientSerializer.TrySchedule(func(context.Context) { + a.handleADSResourceUpdate(serverConfig, rType, updates, md, onDone) + }) } -func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Type, updates map[string]resourceDataErrTuple, md xdsresource.UpdateMetadata, onDone func()) { - a.resourcesMu.Lock() - defer a.resourcesMu.Unlock() +// handleADSResourceUpdate processes an update from the xDS client, updating the +// resource cache and notifying any registered watchers of the update. +// +// Once the update has been processed by all watchers, the authority is expected +// to invoke the onDone callback. +// +// Only executed in the context of a serializer callback. +func (a *authority) handleADSResourceUpdate(serverConfig *bootstrap.ServerConfig, rType xdsresource.Type, updates map[string]ads.DataAndErrTuple, md xdsresource.UpdateMetadata, onDone func()) { + // TODO(easwars-fallback): Trigger reverting to a higher priority server if + // the update is from one. // We build a list of callback funcs to invoke, and invoke them at the end // of this method instead of inline (when handling the update for a @@ -178,8 +234,7 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty // callbacks are invoked, and the watchers have processed the update. watcherCnt := new(atomic.Int64) done := func() { - watcherCnt.Add(-1) - if watcherCnt.Load() == 0 { + if watcherCnt.Add(-1) == 0 { onDone() } } @@ -190,93 +245,67 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty // this update, invoke onDone explicitly to unblock the next read on // the ADS stream. onDone() + return } for _, f := range funcsToSchedule { - a.serializer.ScheduleOr(f, onDone) + a.watcherCallbackSerializer.ScheduleOr(f, onDone) } }() resourceStates := a.resources[rType] for name, uErr := range updates { - if state, ok := resourceStates[name]; ok { - // Cancel the expiry timer associated with the resource once a - // response is received, irrespective of whether the update is a - // good one or not. - // - // We check for watch states `started` and `requested` here to - // accommodate for a race which can happen in the following - // scenario: - // - When a watch is registered, it is possible that the ADS stream - // is not yet created. In this case, the request for the resource - // is not sent out immediately. An entry in the `resourceStates` - // map is created with a watch state of `started`. - // - Once the stream is created, it is possible that the management - // server might respond with the requested resource before we send - // out request for the same. If we don't check for `started` here, - // and move the state to `received`, we will end up starting the - // timer when the request gets sent out. And since the management - // server already sent us the resource, there is a good chance - // that it will not send it again. This would eventually lead to - // the timer firing, even though we have the resource in the - // cache. - if state.wState == watchStateStarted || state.wState == watchStateRequested { - // It is OK to ignore the return value from Stop() here because - // if the timer has already fired, it means that the timer watch - // expiry callback is blocked on the same lock that we currently - // hold. Since we move the state to `received` here, the timer - // callback will be a no-op. - if state.wTimer != nil { - state.wTimer.Stop() - } - state.wState = watchStateReceived - } + state, ok := resourceStates[name] + if !ok { + continue + } - if uErr.err != nil { - // On error, keep previous version of the resource. But update - // status and error. - state.md.ErrState = md.ErrState - state.md.Status = md.Status - for watcher := range state.watchers { - watcher := watcher - err := uErr.err - watcherCnt.Add(1) - funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnError(err, done) }) - } - continue + // On error, keep previous version of the resource. But update status + // and error. + if uErr.Err != nil { + state.md.ErrState = md.ErrState + state.md.Status = md.Status + for watcher := range state.watchers { + watcher := watcher + err := uErr.Err + watcherCnt.Add(1) + funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnError(err, done) }) } + continue + } - if state.deletionIgnored { - state.deletionIgnored = false - a.logger.Infof("A valid update was received for resource %q of type %q after previously ignoring a deletion", name, rType.TypeName()) - } - // Notify watchers if any of these conditions are met: - // - this is the first update for this resource - // - this update is different from the one currently cached - // - the previous update for this resource was NACKed, but the update - // before that was the same as this update. - if state.cache == nil || !state.cache.RawEqual(uErr.resource) || state.md.ErrState != nil { - for watcher := range state.watchers { - watcher := watcher - resource := uErr.resource - watcherCnt.Add(1) - funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnUpdate(resource, done) }) - } - } - // Sync cache. + if state.deletionIgnored { + state.deletionIgnored = false + a.logger.Infof("A valid update was received for resource %q of type %q after previously ignoring a deletion", name, rType.TypeName()) + } + // Notify watchers if any of these conditions are met: + // - this is the first update for this resource + // - this update is different from the one currently cached + // - the previous update for this resource was NACKed, but the update + // before that was the same as this update. + if state.cache == nil || !state.cache.RawEqual(uErr.Resource) || state.md.ErrState != nil { + // Update the resource cache. if a.logger.V(2) { a.logger.Infof("Resource type %q with name %q added to cache", rType.TypeName(), name) } - state.cache = uErr.resource - // Set status to ACK, and clear error state. The metadata might be a - // NACK metadata because some other resources in the same response - // are invalid. - state.md = md - state.md.ErrState = nil - state.md.Status = xdsresource.ServiceStatusACKed - if md.ErrState != nil { - state.md.Version = md.ErrState.Version + state.cache = uErr.Resource + + for watcher := range state.watchers { + watcher := watcher + resource := uErr.Resource + watcherCnt.Add(1) + funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnUpdate(resource, done) }) } } + + // Set status to ACK, and clear error state. The metadata might be a + // NACK metadata because some other resources in the same response + // are invalid. + state.md = md + state.md.ErrState = nil + state.md.Status = xdsresource.ServiceStatusACKed + if md.ErrState != nil { + state.md.Version = md.ErrState.Version + } } // If this resource type requires that all resources be present in every @@ -306,336 +335,271 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty // resource. So, there is no need to generate another one. continue } - if _, ok := updates[name]; !ok { + if _, ok := updates[name]; ok { + // If the resource was present in the response, move on. + continue + } + if state.md.Status == xdsresource.ServiceStatusNotExist { // The metadata status is set to "ServiceStatusNotExist" if a // previous update deleted this resource, in which case we do not // want to repeatedly call the watch callbacks with a // "resource-not-found" error. - if state.md.Status == xdsresource.ServiceStatusNotExist { - continue - } - // Per A53, resource deletions are ignored if the `ignore_resource_deletion` - // server feature is enabled through the bootstrap configuration. If the - // resource deletion is to be ignored, the resource is not removed from - // the cache and the corresponding OnResourceDoesNotExist() callback is - // not invoked on the watchers. - if a.serverCfg.ServerFeaturesIgnoreResourceDeletion() { - if !state.deletionIgnored { - state.deletionIgnored = true - a.logger.Warningf("Ignoring resource deletion for resource %q of type %q", name, rType.TypeName()) - } - continue - } - // If resource exists in cache, but not in the new update, delete - // the resource from cache, and also send a resource not found error - // to indicate resource removed. Metadata for the resource is still - // maintained, as this is required by CSDS. - state.cache = nil - state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} - for watcher := range state.watchers { - watcher := watcher - watcherCnt.Add(1) - funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnResourceDoesNotExist(done) }) - } - } - } -} - -type resourceDataErrTuple struct { - resource xdsresource.ResourceData - err error -} - -func decodeAllResources(opts *xdsresource.DecodeOptions, rType xdsresource.Type, update transport.ResourceUpdate) (map[string]resourceDataErrTuple, xdsresource.UpdateMetadata, error) { - timestamp := time.Now() - md := xdsresource.UpdateMetadata{ - Version: update.Version, - Timestamp: timestamp, - } - - topLevelErrors := make([]error, 0) // Tracks deserialization errors, where we don't have a resource name. - perResourceErrors := make(map[string]error) // Tracks resource validation errors, where we have a resource name. - ret := make(map[string]resourceDataErrTuple) // Return result, a map from resource name to either resource data or error. - for _, r := range update.Resources { - result, err := rType.Decode(opts, r) - - // Name field of the result is left unpopulated only when resource - // deserialization fails. - name := "" - if result != nil { - name = xdsresource.ParseName(result.Name).String() - } - if err == nil { - ret[name] = resourceDataErrTuple{resource: result.Resource} continue } - if name == "" { - topLevelErrors = append(topLevelErrors, err) + if serverConfig.ServerFeaturesIgnoreResourceDeletion() { + // Per A53, resource deletions are ignored if the + // `ignore_resource_deletion` server feature is enabled through the + // bootstrap configuration. If the resource deletion is to be + // ignored, the resource is not removed from the cache and the + // corresponding OnResourceDoesNotExist() callback is not invoked on + // the watchers. + if !state.deletionIgnored { + state.deletionIgnored = true + a.logger.Warningf("Ignoring resource deletion for resource %q of type %q", name, rType.TypeName()) + } continue } - perResourceErrors[name] = err - // Add place holder in the map so we know this resource name was in - // the response. - ret[name] = resourceDataErrTuple{err: err} - } - if len(topLevelErrors) == 0 && len(perResourceErrors) == 0 { - md.Status = xdsresource.ServiceStatusACKed - return ret, md, nil - } - - md.Status = xdsresource.ServiceStatusNACKed - errRet := combineErrors(rType.TypeName(), topLevelErrors, perResourceErrors) - md.ErrState = &xdsresource.UpdateErrorMetadata{ - Version: update.Version, - Err: errRet, - Timestamp: timestamp, - } - return ret, md, errRet -} - -// startWatchTimersLocked is invoked upon transport.OnSend() callback with resources -// requested on the underlying ADS stream. This satisfies the conditions to start -// watch timers per A57 [https://github.com/grpc/proposal/blob/master/A57-xds-client-failure-mode-behavior.md#handling-resources-that-do-not-exist] -// -// Caller must hold a.resourcesMu. -func (a *authority) startWatchTimersLocked(rType xdsresource.Type, resourceNames []string) { - resourceStates := a.resources[rType] - for _, resourceName := range resourceNames { - if state, ok := resourceStates[resourceName]; ok { - if state.wState != watchStateStarted { - continue - } - state.wTimer = time.AfterFunc(a.watchExpiryTimeout, func() { - a.resourcesMu.Lock() - a.handleWatchTimerExpiryLocked(rType, resourceName, state) - a.resourcesMu.Unlock() - }) - state.wState = watchStateRequested + // If we get here, it means that the resource exists in cache, but not + // in the new update. Delete the resource from cache, and send a + // resource not found error to indicate that the resource has been + // removed. Metadata for the resource is still maintained, as this is + // required by CSDS. + state.cache = nil + state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} + for watcher := range state.watchers { + watcher := watcher + watcherCnt.Add(1) + funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnResourceDoesNotExist(done) }) } } } -// stopWatchTimersLocked is invoked upon connection errors to stops watch timers -// for resources that have been requested, but not yet responded to by the management -// server. -// -// Caller must hold a.resourcesMu. -func (a *authority) stopWatchTimersLocked() { - for _, rType := range a.resources { - for resourceName, state := range rType { - if state.wState != watchStateRequested { - continue - } - if !state.wTimer.Stop() { - // If the timer has already fired, it means that the timer watch expiry - // callback is blocked on the same lock that we currently hold. Don't change - // the watch state and instead let the watch expiry callback handle it. - a.logger.Warningf("Watch timer for resource %v already fired. Ignoring here.", resourceName) - continue - } - state.wTimer = nil - state.wState = watchStateStarted - } - } +// adsResourceDoesNotExist is called by the xDS client implementation (on all +// interested authorities) to notify the authority that a subscribed resource +// does not exist. +func (a *authority) adsResourceDoesNotExist(rType xdsresource.Type, resourceName string) { + a.xdsClientSerializer.TrySchedule(func(context.Context) { + a.handleADSResourceDoesNotExist(rType, resourceName) + }) } -// newConnectionError is called by the underlying transport when it receives a -// connection error. The error will be forwarded to all the resource watchers. -func (a *authority) newConnectionError(err error) { - a.resourcesMu.Lock() - defer a.resourcesMu.Unlock() - - a.stopWatchTimersLocked() +// handleADSResourceDoesNotExist is called when a subscribed resource does not +// exist. It removes the resource from the cache, updates the metadata status +// to ServiceStatusNotExist, and notifies all watchers that the resource does +// not exist. +func (a *authority) handleADSResourceDoesNotExist(rType xdsresource.Type, resourceName string) { + if a.logger.V(2) { + a.logger.Infof("Watch for resource %q of type %s timed out", resourceName, rType.TypeName()) + } - // We do not consider it an error if the ADS stream was closed after having received - // a response on the stream. This is because there are legitimate reasons why the server - // may need to close the stream during normal operations, such as needing to rebalance - // load or the underlying connection hitting its max connection age limit. - // See gRFC A57 for more details. - if xdsresource.ErrType(err) == xdsresource.ErrTypeStreamFailedAfterRecv { - a.logger.Warningf("Watchers not notified since ADS stream failed after having received at least one response: %v", err) + resourceStates := a.resources[rType] + if resourceStates == nil { + if a.logger.V(2) { + a.logger.Infof("Resource %q of type %s currently not being watched", resourceName, rType.TypeName()) + } return } - - for _, rType := range a.resources { - for _, state := range rType { - // Propagate the connection error from the transport layer to all watchers. - for watcher := range state.watchers { - watcher := watcher - a.serializer.TrySchedule(func(context.Context) { - watcher.OnError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err), func() {}) - }) - } + state, ok := resourceStates[resourceName] + if !ok { + if a.logger.V(2) { + a.logger.Infof("Resource %q of type %s currently not being watched", resourceName, rType.TypeName()) } + return } -} - -// Increments the reference count. Caller must hold parent's authorityMu. -func (a *authority) refLocked() { - a.refCount++ -} - -// Decrements the reference count. Caller must hold parent's authorityMu. -func (a *authority) unrefLocked() int { - a.refCount-- - return a.refCount -} -func (a *authority) close() { - a.transport.Close() - - a.resourcesMu.Lock() - a.closed = true - a.resourcesMu.Unlock() + state.cache = nil + state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} + for watcher := range state.watchers { + watcher := watcher + a.watcherCallbackSerializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(func() {}) }) + } } +// watchResource registers a new watcher for the specified resource type and +// name. It returns a function that can be called to cancel the watch. +// +// If this is the first watch for any resource on this authority, an xdsChannel +// to the first management server (from the list of server configurations) will +// be created. +// +// If this is the first watch for the given resource name, it will subscribe to +// the resource with the xdsChannel. If a cached copy of the resource exists, it +// will immediately notify the new watcher. When the last watcher for a resource +// is removed, it will unsubscribe the resource from the xdsChannel. func (a *authority) watchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) func() { - if a.logger.V(2) { - a.logger.Infof("New watch for type %q, resource name %q", rType.TypeName(), resourceName) - } - a.resourcesMu.Lock() - defer a.resourcesMu.Unlock() - - // Lookup the ResourceType specific resources from the top-level map. If - // there is no entry for this ResourceType, create one. - resources := a.resources[rType] - if resources == nil { - resources = make(map[string]*resourceState) - a.resources[rType] = resources - } + cleanup := func() {} + done := make(chan struct{}) - // Lookup the resourceState for the particular resource that the watch is - // being registered for. If this is the first watch for this resource, - // instruct the transport layer to send a DiscoveryRequest for the same. - state := resources[resourceName] - if state == nil { - if a.logger.V(2) { - a.logger.Infof("First watch for type %q, resource name %q", rType.TypeName(), resourceName) - } - state = &resourceState{ - watchers: make(map[xdsresource.ResourceWatcher]bool), - md: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}, - wState: watchStateStarted, - } - resources[resourceName] = state - a.sendDiscoveryRequestLocked(rType, resources) - } - // Always add the new watcher to the set of watchers. - state.watchers[watcher] = true + a.xdsClientSerializer.TrySchedule(func(context.Context) { + defer close(done) - // If we have a cached copy of the resource, notify the new watcher. - if state.cache != nil { if a.logger.V(2) { - a.logger.Infof("Resource type %q with resource name %q found in cache: %s", rType.TypeName(), resourceName, state.cache.ToJSON()) + a.logger.Infof("New watch for type %q, resource name %q", rType.TypeName(), resourceName) } - resource := state.cache - a.serializer.TrySchedule(func(context.Context) { watcher.OnUpdate(resource, func() {}) }) - } - return func() { - a.resourcesMu.Lock() - defer a.resourcesMu.Unlock() + xdsChannel := a.xdsChannelToUse() + if xdsChannel == nil { + return + } - // We already have a reference to the resourceState for this particular - // resource. Avoid indexing into the two-level map to figure this out. + // Lookup the entry for the resource type in the top-level map. If there is + // no entry for this resource type, create one. + resources := a.resources[rType] + if resources == nil { + resources = make(map[string]*resourceState) + a.resources[rType] = resources + } - // Delete this particular watcher from the list of watchers, so that its - // callback will not be invoked in the future. - state.wState = watchStateCanceled - delete(state.watchers, watcher) - if len(state.watchers) > 0 { - return + // Lookup the resource state for the particular resource name that the watch + // is being registered for. If this is the first watch for this resource + // name, request it from the management server. + state := resources[resourceName] + if state == nil { + if a.logger.V(2) { + a.logger.Infof("First watch for type %q, resource name %q", rType.TypeName(), resourceName) + } + state = &resourceState{ + watchers: make(map[xdsresource.ResourceWatcher]bool), + md: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}, + xdsChannelConfigs: []*xdsChannelWithConfig{xdsChannel}, + } + resources[resourceName] = state + xdsChannel.xc.subscribe(rType, resourceName) } + // Always add the new watcher to the set of watchers. + state.watchers[watcher] = true - // There are no more watchers for this resource, delete the state - // associated with it, and instruct the transport to send a request - // which does not include this resource name. - if a.logger.V(2) { - a.logger.Infof("Removing last watch for type %q, resource name %q", rType.TypeName(), resourceName) + // If we have a cached copy of the resource, notify the new watcher. + if state.cache != nil { + if a.logger.V(2) { + a.logger.Infof("Resource type %q with resource name %q found in cache: %s", rType.TypeName(), resourceName, state.cache.ToJSON()) + } + resource := state.cache + a.watcherCallbackSerializer.TrySchedule(func(context.Context) { watcher.OnUpdate(resource, func() {}) }) } - delete(resources, resourceName) - a.sendDiscoveryRequestLocked(rType, resources) - } + cleanup = a.unwatchResource(rType, resourceName, watcher) + }) + <-done + return cleanup } -func (a *authority) handleWatchTimerExpiryLocked(rType xdsresource.Type, resourceName string, state *resourceState) { - if a.closed { - return - } - a.logger.Warningf("Watch for resource %q of type %s timed out", resourceName, rType.TypeName()) +func (a *authority) unwatchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) func() { + return grpcsync.OnceFunc(func() { + done := make(chan struct{}) + a.xdsClientSerializer.ScheduleOr(func(context.Context) { + defer close(done) - switch state.wState { - case watchStateRequested: - // This is the only state where we need to handle the timer expiry by - // invoking appropriate watch callbacks. This is handled outside the switch. - case watchStateCanceled: - return - default: - a.logger.Warningf("Unexpected watch state %q for resource %q.", state.wState, resourceName) - return - } + if a.logger.V(2) { + a.logger.Infof("Canceling a watch for type %q, resource name %q", rType.TypeName(), resourceName) + } - state.wState = watchStateTimeout - // With the watch timer firing, it is safe to assume that the resource does - // not exist on the management server. - state.cache = nil - state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} - for watcher := range state.watchers { - watcher := watcher - a.serializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(func() {}) }) - } -} + // Lookup the resource type from the resource cache. The entry is + // guaranteed to be present, since *we* were the ones who added it in + // there when the watch was registered. + resources := a.resources[rType] + state := resources[resourceName] + + // Delete this particular watcher from the list of watchers, so that its + // callback will not be invoked in the future. + delete(state.watchers, watcher) + if len(state.watchers) > 0 { + if a.logger.V(2) { + a.logger.Infof("%d more watchers exist for type %q, resource name %q", rType.TypeName(), resourceName) + } + return + } -func (a *authority) triggerResourceNotFoundForTesting(rType xdsresource.Type, resourceName string) { - a.resourcesMu.Lock() - defer a.resourcesMu.Unlock() + // There are no more watchers for this resource. Unsubscribe this + // resource from all channels where it was subscribed to and delete + // the state associated with it. + if a.logger.V(2) { + a.logger.Infof("Removing last watch for resource name %q", resourceName) + } + for _, xc := range state.xdsChannelConfigs { + xc.xc.unsubscribe(rType, resourceName) + } + delete(resources, resourceName) - if a.closed { - return - } - resourceStates := a.resources[rType] - state, ok := resourceStates[resourceName] - if !ok { - return - } - // if watchStateTimeout already triggered resource not found above from - // normal watch expiry. - if state.wState == watchStateCanceled || state.wState == watchStateTimeout { - return - } - state.wState = watchStateTimeout - state.cache = nil - state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} - for watcher := range state.watchers { - watcher := watcher - a.serializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(func() {}) }) - } + // If there are no more watchers for this resource type, delete the + // resource type from the top-level map. + if len(resources) == 0 { + if a.logger.V(2) { + a.logger.Infof("Removing last watch for resource type %q", rType.TypeName()) + } + delete(a.resources, rType) + } + // If there are no more watchers for any resource type, release the + // reference to the xdsChannels. + if len(a.resources) == 0 { + if a.logger.V(2) { + a.logger.Infof("Removing last watch for for any resource type, releasing reference to the xdsChannel") + } + a.closeXDSChannels() + } + }, func() { close(done) }) + <-done + }) } -// sendDiscoveryRequestLocked sends a discovery request for the specified -// resource type and resource names. Even though this method does not directly -// access the resource cache, it is important that `resourcesMu` be held when -// calling this method to ensure that a consistent snapshot of resource names is -// being requested. -func (a *authority) sendDiscoveryRequestLocked(rType xdsresource.Type, resources map[string]*resourceState) { - resourcesToRequest := make([]string, len(resources)) - i := 0 - for name := range resources { - resourcesToRequest[i] = name - i++ +// xdsChannelToUse returns the xdsChannel to use for communicating with the +// management server. If an active channel is available, it returns that. +// Otherwise, it creates a new channel using the first server configuration in +// the list of configurations, and returns that. +// +// Only executed in the context of a serializer callback. +func (a *authority) xdsChannelToUse() *xdsChannelWithConfig { + if a.activeXDSChannel != nil { + return a.activeXDSChannel + } + + sc := a.xdsChannelConfigs[0].sc + xc, cleanup, err := a.getChannelForADS(sc, a) + if err != nil { + a.logger.Warningf("Failed to create xDS channel: %v", err) + return nil } - a.transport.SendRequest(rType.TypeURL(), resourcesToRequest) + a.xdsChannelConfigs[0].xc = xc + a.xdsChannelConfigs[0].cleanup = cleanup + a.activeXDSChannel = a.xdsChannelConfigs[0] + return a.activeXDSChannel } -func (a *authority) reportLoad() (*load.Store, func()) { - return a.transport.ReportLoad() +// closeXDSChannels closes all the xDS channels associated with this authority, +// when there are no more watchers for any resource type. +// +// Only executed in the context of a serializer callback. +func (a *authority) closeXDSChannels() { + for _, xc := range a.xdsChannelConfigs { + if xc.cleanup != nil { + xc.cleanup() + xc.cleanup = nil + } + xc.xc = nil + } + a.activeXDSChannel = nil } +// dumpResources returns a dump of the resource configuration cached by this +// authority, for CSDS purposes. func (a *authority) dumpResources() []*v3statuspb.ClientConfig_GenericXdsConfig { - a.resourcesMu.Lock() - defer a.resourcesMu.Unlock() + var ret []*v3statuspb.ClientConfig_GenericXdsConfig + done := make(chan struct{}) + + a.xdsClientSerializer.TrySchedule(func(context.Context) { + defer close(done) + ret = a.resourceConfig() + }) + <-done + return ret +} +// resourceConfig returns a slice of GenericXdsConfig objects representing the +// current state of all resources managed by this authority. This is used for +// reporting the current state of the xDS client. +// +// Only executed in the context of a serializer callback. +func (a *authority) resourceConfig() []*v3statuspb.ClientConfig_GenericXdsConfig { var ret []*v3statuspb.ClientConfig_GenericXdsConfig for rType, resourceStates := range a.resources { typeURL := rType.TypeURL() @@ -665,6 +629,14 @@ func (a *authority) dumpResources() []*v3statuspb.ClientConfig_GenericXdsConfig return ret } +func (a *authority) close() { + a.xdsClientSerializerClose() + <-a.xdsClientSerializer.Done() + if a.logger.V(2) { + a.logger.Infof("Closed") + } +} + func serviceStatusToProto(serviceStatus xdsresource.ServiceStatus) v3adminpb.ClientResourceStatus { switch serviceStatus { case xdsresource.ServiceStatusUnknown: @@ -681,28 +653,3 @@ func serviceStatusToProto(serviceStatus xdsresource.ServiceStatus) v3adminpb.Cli return v3adminpb.ClientResourceStatus_UNKNOWN } } - -func combineErrors(rType string, topLevelErrors []error, perResourceErrors map[string]error) error { - var errStrB strings.Builder - errStrB.WriteString(fmt.Sprintf("error parsing %q response: ", rType)) - if len(topLevelErrors) > 0 { - errStrB.WriteString("top level errors: ") - for i, err := range topLevelErrors { - if i != 0 { - errStrB.WriteString(";\n") - } - errStrB.WriteString(err.Error()) - } - } - if len(perResourceErrors) > 0 { - var i int - for name, err := range perResourceErrors { - if i != 0 { - errStrB.WriteString(";\n") - } - i++ - errStrB.WriteString(fmt.Sprintf("resource %q: %v", name, err.Error())) - } - } - return errors.New(errStrB.String()) -} diff --git a/xds/internal/xdsclient/authority_test.go b/xds/internal/xdsclient/authority_test.go deleted file mode 100644 index bd6ff27bce6c..000000000000 --- a/xds/internal/xdsclient/authority_test.go +++ /dev/null @@ -1,295 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package xdsclient - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/google/uuid" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/testutils/xds/e2e" - "google.golang.org/grpc/xds/internal" - - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/internal/xds/bootstrap" - xdstestutils "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" - - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. -) - -var emptyServerOpts = e2e.ManagementServerOptions{} - -var ( - // Listener resource type implementation retrieved from the resource type map - // in the internal package, which is initialized when the individual resource - // types are created. - listenerResourceType = internal.ResourceTypeMapForTesting[version.V3ListenerURL].(xdsresource.Type) - rtRegistry = newResourceTypeRegistry() -) - -func init() { - // Simulating maybeRegister for listenerResourceType. The getter to this registry - // is passed to the authority for accessing the resource type. - rtRegistry.types[listenerResourceType.TypeURL()] = listenerResourceType -} - -func setupTest(ctx context.Context, t *testing.T, opts e2e.ManagementServerOptions, watchExpiryTimeout time.Duration) (*authority, *e2e.ManagementServer, string) { - t.Helper() - nodeID := uuid.New().String() - managementServer := e2e.StartManagementServer(t, opts) - - contents := e2e.DefaultBootstrapContents(t, nodeID, managementServer.Address) - testutils.CreateBootstrapFileForTesting(t, contents) - config, err := bootstrap.GetConfiguration() - if err != nil { - t.Fatalf("Failed to read bootstrap configuration: %v", err) - } - a, err := newAuthority(authorityArgs{ - serverCfg: config.XDSServers()[0], - bootstrapCfg: config, - serializer: grpcsync.NewCallbackSerializer(ctx), - resourceTypeGetter: rtRegistry.get, - watchExpiryTimeout: watchExpiryTimeout, - logger: nil, - }) - if err != nil { - t.Fatalf("Failed to create authority: %q", err) - } - return a, managementServer, nodeID -} - -// This tests verifies watch and timer state for the scenario where a watch for -// an LDS resource is registered and the management server sends an update the -// same resource. -func (s) TestTimerAndWatchStateOnSendCallback(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - a, ms, nodeID := setupTest(ctx, t, emptyServerOpts, defaultTestTimeout) - defer a.close() - - rn := "xdsclient-test-lds-resource" - w := xdstestutils.NewTestResourceWatcher() - cancelResource := a.watchResource(listenerResourceType, rn, w) - defer cancelResource() - - // Looping until the underlying transport has successfully sent the request to - // the server, which would call the onSend callback and transition the watchState - // to `watchStateRequested`. - for ctx.Err() == nil { - if err := compareWatchState(a, rn, watchStateRequested); err == nil { - break - } - } - if ctx.Err() != nil { - t.Fatalf("Test timed out before state transition to %q was verified.", watchStateRequested) - } - - // Updating mgmt server with the same lds resource. Blocking on watcher's update - // ch to verify the watch state transition to `watchStateReceived`. - if err := updateResourceInServer(ctx, ms, rn, nodeID); err != nil { - t.Fatalf("Failed to update server with resource: %q; err: %q", rn, err) - } - for { - select { - case <-ctx.Done(): - t.Fatal("Test timed out before watcher received an update from server.") - case <-w.ErrorCh: - case <-w.UpdateCh: - // This means the OnUpdate callback was invoked and the watcher was notified. - if err := compareWatchState(a, rn, watchStateReceived); err != nil { - t.Fatal(err) - } - return - } - } -} - -// This tests the resource's watch state transition when the ADS stream is closed -// by the management server. After the test calls `watchResource` api to register -// a watch for a resource, it stops the management server, and verifies the resource's -// watch state transitions to `watchStateStarted` and timer ready to be restarted. -func (s) TestTimerAndWatchStateOnErrorCallback(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - a, ms, _ := setupTest(ctx, t, emptyServerOpts, defaultTestTimeout) - defer a.close() - - rn := "xdsclient-test-lds-resource" - w := xdstestutils.NewTestResourceWatcher() - cancelResource := a.watchResource(listenerResourceType, rn, w) - defer cancelResource() - - // Stopping the server and blocking on watcher's err channel to be notified. - // This means the onErr callback should be invoked which transitions the watch - // state to `watchStateStarted`. - ms.Stop() - - select { - case <-ctx.Done(): - t.Fatal("Test timed out before verifying error propagation.") - case err := <-w.ErrorCh: - if xdsresource.ErrType(err) != xdsresource.ErrorTypeConnection { - t.Fatal("Connection error not propagated to watchers.") - } - } - - if err := compareWatchState(a, rn, watchStateStarted); err != nil { - t.Fatal(err) - } -} - -// This tests the case where the ADS stream breaks after successfully receiving -// a message on the stream. The test performs the following: -// - configures the management server with the ability to dropRequests based on -// a boolean flag. -// - update the mgmt server with resourceA. -// - registers a watch for resourceA and verifies that the watcher's update -// callback is invoked. -// - registers a watch for resourceB and verifies that the watcher's update -// callback is not invoked. This is because the management server does not -// contain resourceB. -// - force mgmt server to drop requests. Verify that watcher for resourceB gets -// connection error. -// - resume mgmt server to accept requests. -// - update the mgmt server with resourceB and verifies that the watcher's -// update callback is invoked. -func (s) TestWatchResourceTimerCanRestartOnIgnoredADSRecvError(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Create a restartable listener which can close existing connections. - l, err := testutils.LocalTCPListener() - if err != nil { - t.Fatalf("testutils.LocalTCPListener() failed: %v", err) - } - lis := testutils.NewRestartableListener(l) - defer lis.Close() - streamRestarted := grpcsync.NewEvent() - serverOpt := e2e.ManagementServerOptions{ - Listener: lis, - OnStreamClosed: func(int64, *v3corepb.Node) { - streamRestarted.Fire() - }, - } - - a, ms, nodeID := setupTest(ctx, t, serverOpt, defaultTestTimeout) - defer a.close() - - nameA := "xdsclient-test-lds-resourceA" - watcherA := xdstestutils.NewTestResourceWatcher() - cancelA := a.watchResource(listenerResourceType, nameA, watcherA) - - if err := updateResourceInServer(ctx, ms, nameA, nodeID); err != nil { - t.Fatalf("Failed to update server with resource: %q; err: %q", nameA, err) - } - - // Blocking on resource A watcher's update Channel to verify that there is - // more than one msg(s) received the ADS stream. - select { - case <-ctx.Done(): - t.Fatal("Test timed out before watcher received the update.") - case err := <-watcherA.ErrorCh: - t.Fatalf("Watch got an unexpected error update: %q; want: valid update.", err) - case <-watcherA.UpdateCh: - } - - cancelA() - lis.Stop() - - nameB := "xdsclient-test-lds-resourceB" - watcherB := xdstestutils.NewTestResourceWatcher() - cancelB := a.watchResource(listenerResourceType, nameB, watcherB) - defer cancelB() - - // Blocking on resource B watcher's error channel. This error should be due to - // connectivity issue when reconnecting because the mgmt server was already been - // stopped. Also verifying that OnResourceDoesNotExist() method was not invoked - // on the watcher. - select { - case <-ctx.Done(): - t.Fatal("Test timed out before mgmt server got the request.") - case u := <-watcherB.UpdateCh: - t.Fatalf("Watch got an unexpected resource update: %v.", u) - case <-watcherB.ResourceDoesNotExistCh: - t.Fatalf("Illegal invocation of OnResourceDoesNotExist() method on the watcher.") - case gotErr := <-watcherB.ErrorCh: - wantErr := xdsresource.ErrorTypeConnection - if xdsresource.ErrType(gotErr) != wantErr { - t.Fatalf("Watch got an unexpected error:%q. Want: %q.", gotErr, wantErr) - } - } - - // Updating server with resource B and also re-enabling requests on the server. - if err := updateResourceInServer(ctx, ms, nameB, nodeID); err != nil { - t.Fatalf("Failed to update server with resource: %q; err: %q", nameB, err) - } - lis.Restart() - - for { - select { - case <-ctx.Done(): - t.Fatal("Test timed out before watcher received the update.") - case <-watcherB.UpdateCh: - return - } - } -} - -func compareWatchState(a *authority, rn string, wantState watchState) error { - a.resourcesMu.Lock() - defer a.resourcesMu.Unlock() - gotState := a.resources[listenerResourceType][rn].wState - if gotState != wantState { - return fmt.Errorf("Got %v. Want: %v", gotState, wantState) - } - - wTimer := a.resources[listenerResourceType][rn].wTimer - switch gotState { - case watchStateRequested: - if wTimer == nil { - return fmt.Errorf("got nil timer, want active timer") - } - case watchStateStarted: - if wTimer != nil { - return fmt.Errorf("got active timer, want nil timer") - } - default: - if wTimer.Stop() { - // This means that the timer was running but could be successfully stopped. - return fmt.Errorf("got active timer, want stopped timer") - } - } - return nil -} - -func updateResourceInServer(ctx context.Context, ms *e2e.ManagementServer, rn string, nID string) error { - l := e2e.DefaultClientListener(rn, "new-rds-resource") - resources := e2e.UpdateOptions{ - NodeID: nID, - Listeners: []*v3listenerpb.Listener{l}, - SkipValidation: true, - } - return ms.Update(ctx, resources) -} diff --git a/xds/internal/xdsclient/channel.go b/xds/internal/xdsclient/channel.go index 4a1e73c943b1..383720e645cf 100644 --- a/xds/internal/xdsclient/channel.go +++ b/xds/internal/xdsclient/channel.go @@ -20,6 +20,7 @@ package xdsclient import ( "errors" "fmt" + "strings" "time" "google.golang.org/grpc/grpclog" @@ -303,3 +304,39 @@ func decodeResponse(opts *xdsresource.DecodeOptions, rType xdsresource.Type, res } return ret, md, errRet } + +func combineErrors(rType string, topLevelErrors []error, perResourceErrors map[string]error) error { + var errStrB strings.Builder + errStrB.WriteString(fmt.Sprintf("error parsing %q response: ", rType)) + if len(topLevelErrors) > 0 { + errStrB.WriteString("top level errors: ") + for i, err := range topLevelErrors { + if i != 0 { + errStrB.WriteString(";\n") + } + errStrB.WriteString(err.Error()) + } + } + if len(perResourceErrors) > 0 { + var i int + for name, err := range perResourceErrors { + if i != 0 { + errStrB.WriteString(";\n") + } + i++ + errStrB.WriteString(fmt.Sprintf("resource %q: %v", name, err.Error())) + } + } + return errors.New(errStrB.String()) +} + +func (xc *xdsChannel) triggerResourceNotFoundForTesting(rType xdsresource.Type, resourceName string) error { + if xc.closed.HasFired() { + return fmt.Errorf("triggerResourceNotFoundForTesting() called on a closed channel") + } + if xc.logger.V(2) { + xc.logger.Infof("Triggering resource not found for type: %s, resource name: %s", rType.TypeName(), resourceName) + } + xc.ads.TriggerResourceNotFoundForTesting(rType, resourceName) + return nil +} diff --git a/xds/internal/xdsclient/channel_test.go b/xds/internal/xdsclient/channel_test.go index 5d57d50a1847..670e76c57dc5 100644 --- a/xds/internal/xdsclient/channel_test.go +++ b/xds/internal/xdsclient/channel_test.go @@ -78,9 +78,9 @@ func xdsChannelForTest(t *testing.T, serverURI, nodeID string, watchExpiryTimeou // management server. contents, err := bootstrap.NewContentsForTesting(bootstrap.ConfigOptionsForTesting{ Servers: []byte(fmt.Sprintf(`[{ - "server_uri": %q, - "channel_creds": [{"type": "insecure"}] - }]`, serverURI)), + "server_uri": %q, + "channel_creds": [{"type": "insecure"}] + }]`, serverURI)), Node: []byte(fmt.Sprintf(`{"id": "%s"}`, nodeID)), }) if err != nil { diff --git a/xds/internal/xdsclient/client_new.go b/xds/internal/xdsclient/client_new.go index 5e11f557b234..839cf23833b3 100644 --- a/xds/internal/xdsclient/client_new.go +++ b/xds/internal/xdsclient/client_new.go @@ -29,6 +29,9 @@ import ( "google.golang.org/grpc/internal/cache" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/xds/bootstrap" + xdsclientinternal "google.golang.org/grpc/xds/internal/xdsclient/internal" + "google.golang.org/grpc/xds/internal/xdsclient/transport/ads" + "google.golang.org/grpc/xds/internal/xdsclient/transport/grpctransport" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -54,24 +57,48 @@ const NameForServer = "#server" // only when all references are released, and it is safe for the caller to // invoke this close function multiple times. func New(name string) (XDSClient, func(), error) { - return newRefCounted(name, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout, backoff.DefaultExponential.Backoff) + return newRefCounted(name, defaultWatchExpiryTimeout, defaultIdleChannelExpiryTimeout, backoff.DefaultExponential.Backoff) } // newClientImpl returns a new xdsClient with the given config. -func newClientImpl(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration, streamBackoff func(int) time.Duration) (*clientImpl, error) { +func newClientImpl(config *bootstrap.Config, watchExpiryTimeout, idleChannelExpiryTimeout time.Duration, streamBackoff func(int) time.Duration) (*clientImpl, error) { ctx, cancel := context.WithCancel(context.Background()) c := &clientImpl{ done: grpcsync.NewEvent(), + authorities: make(map[string]*authority), config: config, watchExpiryTimeout: watchExpiryTimeout, backoff: streamBackoff, serializer: grpcsync.NewCallbackSerializer(ctx), serializerClose: cancel, + transportBuilder: &grpctransport.Builder{}, resourceTypes: newResourceTypeRegistry(), - authorities: make(map[string]*authority), - idleAuthorities: cache.NewTimeoutCache(idleAuthorityDeleteTimeout), + xdsActiveChannels: make(map[string]*channelState), + xdsIdleChannels: cache.NewTimeoutCache(idleChannelExpiryTimeout), } + for name, cfg := range config.Authorities() { + // If server configs are specified in the authorities map, use that. + // Else, use the top-level server configs. + serverCfg := config.XDSServers() + if len(cfg.XDSServers) >= 1 { + serverCfg = cfg.XDSServers + } + c.authorities[name] = newAuthority(authorityBuildOptions{ + serverConfigs: serverCfg, + name: name, + serializer: c.serializer, + getChannelForADS: c.getChannelForADS, + logPrefix: clientPrefix(c), + }) + } + c.topLevelAuthority = newAuthority(authorityBuildOptions{ + serverConfigs: config.XDSServers(), + name: "", + serializer: c.serializer, + getChannelForADS: c.getChannelForADS, + logPrefix: clientPrefix(c), + }) c.logger = prefixLogger(c) return c, nil } @@ -89,13 +116,13 @@ type OptionsForTesting struct { // unspecified, uses the default value used in non-test code. WatchExpiryTimeout time.Duration - // AuthorityIdleTimeout is the timeout before idle authorities are deleted. - // If unspecified, uses the default value used in non-test code. - AuthorityIdleTimeout time.Duration + // IdleChannelExpiryTimeout is the timeout before idle xdsChannels are + // deleted. If unspecified, uses the default value used in non-test code. + IdleChannelExpiryTimeout time.Duration // StreamBackoffAfterFailure is the backoff function used to determine the - // backoff duration after stream failures. If unspecified, uses the default - // value used in non-test code. + // backoff duration after stream failures. + // If unspecified, uses the default value used in non-test code. StreamBackoffAfterFailure func(int) time.Duration } @@ -115,8 +142,8 @@ func NewForTesting(opts OptionsForTesting) (XDSClient, func(), error) { if opts.WatchExpiryTimeout == 0 { opts.WatchExpiryTimeout = defaultWatchExpiryTimeout } - if opts.AuthorityIdleTimeout == 0 { - opts.AuthorityIdleTimeout = defaultIdleAuthorityDeleteTimeout + if opts.IdleChannelExpiryTimeout == 0 { + opts.IdleChannelExpiryTimeout = defaultIdleChannelExpiryTimeout } if opts.StreamBackoffAfterFailure == nil { opts.StreamBackoffAfterFailure = defaultStreamBackoffFunc @@ -125,7 +152,7 @@ func NewForTesting(opts OptionsForTesting) (XDSClient, func(), error) { if err := bootstrap.SetFallbackBootstrapConfig(opts.Contents); err != nil { return nil, nil, err } - client, cancel, err := newRefCounted(opts.Name, opts.WatchExpiryTimeout, opts.AuthorityIdleTimeout, opts.StreamBackoffAfterFailure) + client, cancel, err := newRefCounted(opts.Name, opts.WatchExpiryTimeout, opts.IdleChannelExpiryTimeout, opts.StreamBackoffAfterFailure) return client, func() { bootstrap.UnsetFallbackBootstrapConfigForTesting(); cancel() }, err } @@ -152,6 +179,8 @@ func GetForTesting(name string) (XDSClient, func(), error) { func init() { internal.TriggerXDSResourceNotFoundForTesting = triggerXDSResourceNotFoundForTesting + xdsclientinternal.ResourceWatchStateForTesting = resourceWatchStateForTesting + } func triggerXDSResourceNotFoundForTesting(client XDSClient, typ xdsresource.Type, name string) error { @@ -162,6 +191,14 @@ func triggerXDSResourceNotFoundForTesting(client XDSClient, typ xdsresource.Type return crc.clientImpl.triggerResourceNotFoundForTesting(typ, name) } +func resourceWatchStateForTesting(client XDSClient, typ xdsresource.Type, name string) (ads.ResourceWatchState, error) { + crc, ok := client.(*clientRefCounted) + if !ok { + return ads.ResourceWatchState{}, fmt.Errorf("xDS client is of type %T, want %T", client, &clientRefCounted{}) + } + return crc.clientImpl.resourceWatchStateForTesting(typ, name) +} + var ( clients = map[string]*clientRefCounted{} clientsMu sync.Mutex diff --git a/xds/internal/xdsclient/client_refcounted.go b/xds/internal/xdsclient/client_refcounted.go index a8c7213aea8e..4bb2cc3f3298 100644 --- a/xds/internal/xdsclient/client_refcounted.go +++ b/xds/internal/xdsclient/client_refcounted.go @@ -29,8 +29,8 @@ import ( ) const ( - defaultWatchExpiryTimeout = 15 * time.Second - defaultIdleAuthorityDeleteTimeout = 5 * time.Minute + defaultWatchExpiryTimeout = 15 * time.Second + defaultIdleChannelExpiryTimeout = 5 * time.Minute ) var ( @@ -63,7 +63,7 @@ func clientRefCountedClose(name string) { // newRefCounted creates a new reference counted xDS client implementation for // name, if one does not exist already. If an xDS client for the given name // exists, it gets a reference to it and returns it. -func newRefCounted(name string, watchExpiryTimeout, idleAuthorityTimeout time.Duration, streamBackoff func(int) time.Duration) (XDSClient, func(), error) { +func newRefCounted(name string, watchExpiryTimeout, idleChannelExpiryTimeout time.Duration, streamBackoff func(int) time.Duration) (XDSClient, func(), error) { clientsMu.Lock() defer clientsMu.Unlock() @@ -77,7 +77,7 @@ func newRefCounted(name string, watchExpiryTimeout, idleAuthorityTimeout time.Du if err != nil { return nil, nil, fmt.Errorf("xds: failed to get xDS bootstrap config: %v", err) } - c, err := newClientImpl(config, watchExpiryTimeout, idleAuthorityTimeout, streamBackoff) + c, err := newClientImpl(config, watchExpiryTimeout, idleChannelExpiryTimeout, streamBackoff) if err != nil { return nil, nil, err } diff --git a/xds/internal/xdsclient/clientimpl.go b/xds/internal/xdsclient/clientimpl.go index 715b1d61548b..df0949e23cc7 100644 --- a/xds/internal/xdsclient/clientimpl.go +++ b/xds/internal/xdsclient/clientimpl.go @@ -19,48 +19,124 @@ package xdsclient import ( + "errors" + "fmt" "sync" + "sync/atomic" "time" "google.golang.org/grpc/internal/cache" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/transport" + "google.golang.org/grpc/xds/internal/xdsclient/transport/ads" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) var _ XDSClient = &clientImpl{} +// ErrClientClosed is returned when the xDS client is closed. +var ErrClientClosed = errors.New("xds: the xDS client is closed") + // clientImpl is the real implementation of the xds client. The exported Client // is a wrapper of this struct with a ref count. type clientImpl struct { - done *grpcsync.Event - config *bootstrap.Config - logger *grpclog.PrefixLogger - watchExpiryTimeout time.Duration - backoff func(int) time.Duration // Backoff for ADS and LRS stream failures. - serializer *grpcsync.CallbackSerializer - serializerClose func() - resourceTypes *resourceTypeRegistry - - // authorityMu protects the authority fields. It's necessary because an - // authority is created when it's used. - authorityMu sync.Mutex - // authorities is a map from ServerConfig to authority. So that - // different authorities sharing the same ServerConfig can share the - // authority. - // - // The key is **ServerConfig.String()**, not the authority name. - // - // An authority is either in authorities, or idleAuthorities, - // never both. - authorities map[string]*authority - // idleAuthorities keeps the authorities that are not used (the last - // watch on it was canceled). They are kept in the cache and will be deleted - // after a timeout. The key is ServerConfig.String(). + // The following fields are initialized at creation time and are read-only + // after that, and therefore can be accessed without a mutex. + done *grpcsync.Event // Fired when the client is closed. + topLevelAuthority *authority // The top-level authority, used only for old-style names without an authority. + authorities map[string]*authority // Map from authority names in bootstrap to authority struct. + config *bootstrap.Config // Complete bootstrap configuration. + watchExpiryTimeout time.Duration // Expiry timeout for ADS watch. + backoff func(int) time.Duration // Backoff for ADS and LRS stream failures. + transportBuilder transport.Builder // Builder to create transports to xDS server. + resourceTypes *resourceTypeRegistry // Registry of resource types, for parsing incoming ADS responses. + serializer *grpcsync.CallbackSerializer // Serializer for invoking resource watcher callbacks. + serializerClose func() // Function to close the serializer. + logger *grpclog.PrefixLogger // Logger for this client. + + // The clientImpl owns a bunch of channels to individual xDS servers + // specified in the bootstrap configuration. Authorities acquire references + // to these channels based on server configs within the authority config. + // The clientImpl maintains a list of interested authorities for each of + // these channels, and forwards updates from the channels to each of these + // authorities. // - // An authority is either in authorities, or idleAuthorities, - // never both. - idleAuthorities *cache.TimeoutCache + // Once all references to a channel are dropped, the channel is moved to the + // idle cache where it lives for a configured duration before being closed. + // If the channel is required before the idle timeout fires, it is revived + // from the idle cache and used. + channelsMu sync.Mutex + xdsActiveChannels map[string]*channelState // Map from server config to in-use xdsChannels. + xdsIdleChannels *cache.TimeoutCache // Map from server config to idle xdsChannels. + closeCond *sync.Cond +} + +// channelState represents the state of an xDS channel. It tracks the number of +// LRS references, the authorities interested in the channel, and the server +// configuration used for the channel. +// +// It receives callbacks for events on the underlying ADS stream and invokes +// corresponding callbacks on interested authoririties. +type channelState struct { + parent *clientImpl + serverConfig *bootstrap.ServerConfig + + // Access to the following fields should be protected by the parent's + // channelsMu. + channel *xdsChannel + lrsRefs int + interestedAuthorities map[*authority]bool +} + +func (cs *channelState) adsStreamFailure(err error) { + if cs.parent.done.HasFired() { + return + } + + cs.parent.channelsMu.Lock() + defer cs.parent.channelsMu.Unlock() + for authority := range cs.interestedAuthorities { + authority.adsStreamFailure(cs.serverConfig, err) + } +} + +func (cs *channelState) adsResourceUpdate(typ xdsresource.Type, updates map[string]ads.DataAndErrTuple, md xdsresource.UpdateMetadata, onDone func()) { + if cs.parent.done.HasFired() { + return + } + + cs.parent.channelsMu.Lock() + defer cs.parent.channelsMu.Unlock() + + if len(cs.interestedAuthorities) == 0 { + onDone() + return + } + + authorityCnt := new(atomic.Int64) + authorityCnt.Add(int64(len(cs.interestedAuthorities))) + done := func() { + if authorityCnt.Add(-1) == 0 { + onDone() + } + } + for authority := range cs.interestedAuthorities { + authority.adsResourceUpdate(cs.serverConfig, typ, updates, md, done) + } +} + +func (cs *channelState) adsResourceDoesNotExist(typ xdsresource.Type, resourceName string) { + if cs.parent.done.HasFired() { + return + } + + cs.parent.channelsMu.Lock() + defer cs.parent.channelsMu.Unlock() + for authority := range cs.interestedAuthorities { + authority.adsResourceDoesNotExist(typ, resourceName) + } } // BootstrapConfig returns the configuration read from the bootstrap file. @@ -69,22 +145,51 @@ func (c *clientImpl) BootstrapConfig() *bootstrap.Config { return c.config } -// close closes the gRPC connection to the management server. +// close closes the xDS client and releases all resources. func (c *clientImpl) close() { if c.done.HasFired() { return } c.done.Fire() - // TODO: Should we invoke the registered callbacks here with an error that - // the client is closed? - c.authorityMu.Lock() + c.topLevelAuthority.close() for _, a := range c.authorities { a.close() } - c.idleAuthorities.Clear(true) - c.authorityMu.Unlock() + + // Channel close cannot be invoked with the lock held, because it can race + // with stream failure happening at the same time. The latter will callback + // into the clientImpl and will attempt to grab the lock. This will result + // in a deadlock. So instead, we release the lock and wait for all active + // channels to be closed. + var channelsToClose []*xdsChannel + c.channelsMu.Lock() + for _, cs := range c.xdsActiveChannels { + channelsToClose = append(channelsToClose, cs.channel) + } + c.xdsActiveChannels = nil + c.channelsMu.Unlock() + for _, c := range channelsToClose { + c.close() + } + + // Similarly, closing idle channels cannot be done with the lock held, for + // the same reason as described above. So, we clear the idle cache in a + // goroutine and use a condition variable to wait on the condition that the + // idle cache has zero entries. The Wait() method on the condition variable + // releases the lock and blocks the goroutine until signaled (which happens + // when an idle channel is removed from the cache and closed), and grabs the + // lock before returning. + c.channelsMu.Lock() + c.closeCond = sync.NewCond(&c.channelsMu) + go c.xdsIdleChannels.Clear(true) + for c.xdsIdleChannels.Len() > 0 { + c.closeCond.Wait() + } + c.channelsMu.Unlock() + c.serializerClose() + <-c.serializer.Done() for _, s := range c.config.XDSServers() { for _, f := range s.Cleanups() { @@ -100,3 +205,207 @@ func (c *clientImpl) close() { } c.logger.Infof("Shutdown") } + +// getChannelForADS returns an xdsChannel for the given server configuration. +// +// If an xdsChannel exists for the given server configuration, it is returned. +// Else a new one is created. It also ensures that the calling authority is +// added to the set of interested authorities for the returned channel. +// +// It returns the xdsChannel and a function to release the calling authority's +// reference on the channel. The caller must call the cancel function when it is +// no longer interested in this channel. +// +// A non-nil error is returned if an xdsChannel was not created. +func (c *clientImpl) getChannelForADS(serverConfig *bootstrap.ServerConfig, callingAuthority *authority) (*xdsChannel, func(), error) { + if c.done.HasFired() { + return nil, nil, ErrClientClosed + } + + initLocked := func(s *channelState) { + if c.logger.V(2) { + c.logger.Infof("Adding authority %q to the set of interested authorities for channel [%p]", callingAuthority.name, s.channel) + } + s.interestedAuthorities[callingAuthority] = true + } + deInitLocked := func(s *channelState) { + if c.logger.V(2) { + c.logger.Infof("Removing authority %q from the set of interested authorities for channel [%p]", callingAuthority.name, s.channel) + } + delete(s.interestedAuthorities, callingAuthority) + } + + return c.getOrCreateChannel(serverConfig, initLocked, deInitLocked) +} + +// getChannelForLRS returns an xdsChannel for the given server configuration. +// +// If an xdsChannel exists for the given server configuration, it is returned. +// Else a new one is created. A reference count that tracks the number of LRS +// calls on the returned channel is incremented before returning the channel. +// +// It returns the xdsChannel and a function to decrement the reference count +// that tracks the number of LRS calls on the returned channel. The caller must +// call the cancel function when it is no longer interested in this channel. +// +// A non-nil error is returned if an xdsChannel was not created. +func (c *clientImpl) getChannelForLRS(serverConfig *bootstrap.ServerConfig) (*xdsChannel, func(), error) { + if c.done.HasFired() { + return nil, nil, ErrClientClosed + } + + initLocked := func(s *channelState) { s.lrsRefs++ } + deInitLocked := func(s *channelState) { s.lrsRefs-- } + + return c.getOrCreateChannel(serverConfig, initLocked, deInitLocked) +} + +// getOrCreateChannel returns an xdsChannel for the given server configuration. +// +// If an active xdsChannel exists for the given server configuration, it is +// returned. If an idle xdsChannel exists for the given server configuration, it +// is revived from the idle cache and returned. Else a new one is created. +// +// The initLocked function runs some initialization logic before the channel is +// returned. This includes adding the calling authority to the set of interested +// authorities for the channel or incrementing the count of the number of LRS +// calls on the channel. +// +// The deInitLocked function runs some cleanup logic when the returned cleanup +// function is called. This involves removing the calling authority from the set +// of interested authorities for the channel or decrementing the count of the +// number of LRS calls on the channel. +// +// Both initLocked and deInitLocked are called with the c.channelsMu held. +// +// Returns the xdsChannel and a cleanup function to be invoked when the channel +// is no longer required. A non-nil error is returned if an xdsChannel was not +// created. +func (c *clientImpl) getOrCreateChannel(serverConfig *bootstrap.ServerConfig, initLocked, deInitLocked func(*channelState)) (*xdsChannel, func(), error) { + c.channelsMu.Lock() + defer c.channelsMu.Unlock() + + if c.logger.V(2) { + c.logger.Infof("Received request for a reference to an xdsChannel for server config %q", serverConfig) + } + + // Use an active channel, if one exists for this server config. + if state, ok := c.xdsActiveChannels[serverConfig.String()]; ok { + if c.logger.V(2) { + c.logger.Infof("Reusing an active xdsChannel for server config %q", serverConfig) + } + initLocked(state) + return state.channel, c.releaseChannel(serverConfig, state, deInitLocked), nil + } + + // If an idle channel exists for this server config, remove it from the + // idle cache and add it to the map of active channels, and return it. + if s, ok := c.xdsIdleChannels.Remove(serverConfig.String()); ok { + if c.logger.V(2) { + c.logger.Infof("Reviving an xdsChannel from the idle cache for server config %q", serverConfig) + } + state := s.(*channelState) + c.xdsActiveChannels[serverConfig.String()] = state + initLocked(state) + return state.channel, c.releaseChannel(serverConfig, state, deInitLocked), nil + } + + if c.logger.V(2) { + c.logger.Infof("Creating a new xdsChannel for server config %q", serverConfig) + } + + // Create a new transport and create a new xdsChannel, and add it to the + // map of xdsChannels. + tr, err := c.transportBuilder.Build(transport.BuildOptions{ServerConfig: serverConfig}) + if err != nil { + return nil, func() {}, fmt.Errorf("failed to create transport for server config %s: %v", serverConfig, err) + } + state := &channelState{ + parent: c, + serverConfig: serverConfig, + interestedAuthorities: make(map[*authority]bool), + } + channel, err := newXDSChannel(xdsChannelOpts{ + transport: tr, + serverConfig: serverConfig, + bootstrapConfig: c.config, + resourceTypeGetter: c.resourceTypes.get, + eventHandler: state, + backoff: c.backoff, + watchExpiryTimeout: c.watchExpiryTimeout, + logPrefix: clientPrefix(c), + }) + if err != nil { + return nil, func() {}, fmt.Errorf("failed to create xdsChannel for server config %s: %v", serverConfig, err) + } + state.channel = channel + c.xdsActiveChannels[serverConfig.String()] = state + initLocked(state) + return state.channel, c.releaseChannel(serverConfig, state, deInitLocked), nil +} + +// releaseChannel is a function that is called when a reference to an xdsChannel +// needs to be released. It handles the logic of moving the channel to an idle +// cache if there are no other active references, and closing the channel if it +// remains in the idle cache for the configured duration. +// +// The function takes the following parameters: +// - serverConfig: the server configuration for the xdsChannel +// - state: the state of the xdsChannel +// - deInitLocked: a function that performs any necessary cleanup for the xdsChannel +// +// The function returns another function that can be called to release the +// reference to the xdsChannel. This returned function is idempotent, meaning +// it can be called multiple times without any additional effect. +func (c *clientImpl) releaseChannel(serverConfig *bootstrap.ServerConfig, state *channelState, deInitLocked func(*channelState)) func() { + return grpcsync.OnceFunc(func() { + c.channelsMu.Lock() + defer c.channelsMu.Unlock() + + if c.logger.V(2) { + c.logger.Infof("Received request to release a reference to an xdsChannel for server config %q", serverConfig) + } + deInitLocked(state) + + // The channel has active users. Do nothing and return. + if state.lrsRefs != 0 || len(state.interestedAuthorities) != 0 { + if c.logger.V(2) { + c.logger.Infof("xdsChannel %p has other active references", state.channel) + } + return + } + + // Move the channel to the idle cache instead of closing + // immediately. If the channel remains in the idle cache for + // the configured duration, it will get closed. + delete(c.xdsActiveChannels, serverConfig.String()) + if c.logger.V(2) { + c.logger.Infof("Moving xdsChannel [%p] for server config %s to the idle cache", state.channel, serverConfig) + } + + // The idle cache expiry timeout results in the channel getting + // closed in another serializer callback. + c.xdsIdleChannels.Add(serverConfig.String(), state, grpcsync.OnceFunc(func() { + c.channelsMu.Lock() + channelToClose := state.channel + c.channelsMu.Unlock() + + if c.logger.V(2) { + c.logger.Infof("Idle cache expiry timeout fired for xdsChannel [%p] for server config %s", state.channel, serverConfig) + } + channelToClose.close() + + // If the channel is being closed as a result of the xDS client + // being closed, closeCond is non-nil and we need to signal from + // here to unblock Close(). Holding the lock is not necessary + // to call Signal() on a condition variable. But the field + // `c.closeCond` needs to guarded by the lock, which is why we + // acquire it here. + c.channelsMu.Lock() + if c.closeCond != nil { + c.closeCond.Signal() + } + c.channelsMu.Unlock() + })) + }) +} diff --git a/xds/internal/xdsclient/clientimpl_authority.go b/xds/internal/xdsclient/clientimpl_authority.go deleted file mode 100644 index 56c26b81754c..000000000000 --- a/xds/internal/xdsclient/clientimpl_authority.go +++ /dev/null @@ -1,146 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package xdsclient - -import ( - "errors" - "fmt" - - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/xds/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -// findAuthority returns the authority for this name. If it doesn't already -// exist, one will be created. -// -// Note that this doesn't always create new authority. authorities with the same -// config but different names are shared. -// -// The returned unref function must be called when the caller is done using this -// authority, without holding c.authorityMu. -// -// Caller must not hold c.authorityMu. -func (c *clientImpl) findAuthority(n *xdsresource.Name) (*authority, func(), error) { - scheme, authority := n.Scheme, n.Authority - - c.authorityMu.Lock() - defer c.authorityMu.Unlock() - if c.done.HasFired() { - return nil, nil, errors.New("the xds-client is closed") - } - - config := c.config.XDSServers()[0] - if scheme == xdsresource.FederationScheme { - authorities := c.config.Authorities() - if authorities == nil { - return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) - } - cfg, ok := authorities[authority] - if !ok { - return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) - } - if len(cfg.XDSServers) >= 1 { - config = cfg.XDSServers[0] - } - } - - a, err := c.newAuthorityLocked(config) - if err != nil { - return nil, nil, fmt.Errorf("xds: failed to connect to the control plane for authority %q: %v", authority, err) - } - // All returned authority from this function will be used by a watch, - // holding the ref here. - // - // Note that this must be done while c.authorityMu is held, to avoid the - // race that an authority is returned, but before the watch starts, the - // old last watch is canceled (in another goroutine), causing this - // authority to be removed, and then a watch will start on a removed - // authority. - // - // unref() will be done when the watch is canceled. - a.refLocked() - return a, func() { c.unrefAuthority(a) }, nil -} - -// newAuthorityLocked creates a new authority for the given config. If an -// authority for the given config exists in the cache, it is returned instead of -// creating a new one. -// -// The caller must take a reference of the returned authority before using, and -// unref afterwards. -// -// caller must hold c.authorityMu -func (c *clientImpl) newAuthorityLocked(config *bootstrap.ServerConfig) (_ *authority, retErr error) { - // First check if there's already an authority for this config. If found, it - // means this authority is used by other watches (could be the same - // authority name, or a different authority name but the same server - // config). Return it. - configStr := config.String() - if a, ok := c.authorities[configStr]; ok { - return a, nil - } - // Second check if there's an authority in the idle cache. If found, it - // means this authority was created, but moved to the idle cache because the - // watch was canceled. Move it from idle cache to the authority cache, and - // return. - if old, ok := c.idleAuthorities.Remove(configStr); ok { - oldA, _ := old.(*authority) - if oldA != nil { - c.authorities[configStr] = oldA - return oldA, nil - } - } - - // Make a new authority since there's no existing authority for this config. - ret, err := newAuthority(authorityArgs{ - serverCfg: config, - bootstrapCfg: c.config, - serializer: c.serializer, - resourceTypeGetter: c.resourceTypes.get, - watchExpiryTimeout: c.watchExpiryTimeout, - backoff: c.backoff, - logger: grpclog.NewPrefixLogger(logger, authorityPrefix(c, config.ServerURI())), - }) - if err != nil { - return nil, fmt.Errorf("creating new authority for config %q: %v", config.String(), err) - } - // Add it to the cache, so it will be reused. - c.authorities[configStr] = ret - return ret, nil -} - -// unrefAuthority unrefs the authority. It also moves the authority to idle -// cache if it's ref count is 0. -// -// This function doesn't need to called explicitly. It's called by the returned -// unref from findAuthority(). -// -// Caller must not hold c.authorityMu. -func (c *clientImpl) unrefAuthority(a *authority) { - c.authorityMu.Lock() - defer c.authorityMu.Unlock() - if a.unrefLocked() > 0 { - return - } - configStr := a.serverCfg.String() - delete(c.authorities, configStr) - c.idleAuthorities.Add(configStr, a, func() { - a.close() - }) -} diff --git a/xds/internal/xdsclient/clientimpl_dump.go b/xds/internal/xdsclient/clientimpl_dump.go index f4d7b0a0115c..9d7586773046 100644 --- a/xds/internal/xdsclient/clientimpl_dump.go +++ b/xds/internal/xdsclient/clientimpl_dump.go @@ -24,10 +24,7 @@ import ( // dumpResources returns the status and contents of all xDS resources. func (c *clientImpl) dumpResources() *v3statuspb.ClientConfig { - c.authorityMu.Lock() - defer c.authorityMu.Unlock() - - var retCfg []*v3statuspb.ClientConfig_GenericXdsConfig + retCfg := c.topLevelAuthority.dumpResources() for _, a := range c.authorities { retCfg = append(retCfg, a.dumpResources()...) } diff --git a/xds/internal/xdsclient/clientimpl_loadreport.go b/xds/internal/xdsclient/clientimpl_loadreport.go index b42e43a56976..efb41b87db53 100644 --- a/xds/internal/xdsclient/clientimpl_loadreport.go +++ b/xds/internal/xdsclient/clientimpl_loadreport.go @@ -28,20 +28,14 @@ import ( // It returns a Store for the user to report loads, a function to cancel the // load reporting stream. func (c *clientImpl) ReportLoad(server *bootstrap.ServerConfig) (*load.Store, func()) { - c.authorityMu.Lock() - a, err := c.newAuthorityLocked(server) + xc, releaseChannelRef, err := c.getChannelForLRS(server) if err != nil { - c.authorityMu.Unlock() - c.logger.Warningf("Failed to connect to the management server to report load for authority %q: %v", server, err) + c.logger.Warningf("Failed to create a channel to the management server to report load: %v", server, err) return nil, func() {} } - // Hold the ref before starting load reporting. - a.refLocked() - c.authorityMu.Unlock() - - store, cancelF := a.reportLoad() - return store, func() { - cancelF() - c.unrefAuthority(a) + load, stopLoadReporting := xc.reportLoad() + return load, func() { + stopLoadReporting() + releaseChannelRef() } } diff --git a/xds/internal/xdsclient/clientimpl_watchers.go b/xds/internal/xdsclient/clientimpl_watchers.go index b9af85db63a8..1b8ebe131957 100644 --- a/xds/internal/xdsclient/clientimpl_watchers.go +++ b/xds/internal/xdsclient/clientimpl_watchers.go @@ -22,6 +22,7 @@ import ( "fmt" "sync" + "google.golang.org/grpc/xds/internal/xdsclient/transport/ads" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) @@ -48,20 +49,41 @@ func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, return func() {} } - // TODO: Make ParseName return an error if parsing fails, and - // schedule the OnError callback in that case. n := xdsresource.ParseName(resourceName) - a, unref, err := c.findAuthority(n) - if err != nil { + a := c.getAuthorityForResource(n) + if a == nil { logger.Warningf("Watch registered for name %q of type %q, authority %q is not found", rType.TypeName(), resourceName, n.Authority) - c.serializer.TrySchedule(func(context.Context) { watcher.OnError(err, func() {}) }) + c.serializer.TrySchedule(func(context.Context) { + watcher.OnError(fmt.Errorf("authority %q not found in bootstrap config for resource %q", n.Authority, resourceName), func() {}) + }) return func() {} } - cancelF := a.watchResource(rType, n.String(), watcher) - return func() { - cancelF() - unref() + // The watchResource method on the authority is invoked with n.String() + // instead of resourceName because n.String() canonicalizes the given name. + // So, two resource names which don't differ in the query string, but only + // differ in the order of context params will result in the same resource + // being watched by the authority. + return a.watchResource(rType, n.String(), watcher) +} + +// Gets the authority for the given resource name. +// +// See examples in this section of the gRFC: +// https://github.com/grpc/proposal/blob/master/A47-xds-federation.md#bootstrap-config-changes +func (c *clientImpl) getAuthorityForResource(name *xdsresource.Name) *authority { + // For new-style resource names, always lookup the authorities map. If the + // name does not specify an authority, we will end up looking for an entry + // in the map with the empty string as the key. + if name.Scheme == xdsresource.FederationScheme { + return c.authorities[name.Authority] + } + + // For old-style resource names, we use the top-level authority if the name + // does not specify an authority. + if name.Authority == "" { + return c.topLevelAuthority } + return c.authorities[name.Authority] } // A registry of xdsresource.Type implementations indexed by their corresponding @@ -96,16 +118,29 @@ func (r *resourceTypeRegistry) maybeRegister(rType xdsresource.Type) error { } func (c *clientImpl) triggerResourceNotFoundForTesting(rType xdsresource.Type, resourceName string) error { - if c == nil || c.done.HasFired() { - return fmt.Errorf("attempt to trigger resource-not-found-error for resource %q of type %q, but client is closed", rType.TypeName(), resourceName) + c.channelsMu.Lock() + defer c.channelsMu.Unlock() + + if c.logger.V(2) { + c.logger.Infof("Triggering resource not found for type: %s, resource name: %s", rType.TypeName(), resourceName) } - n := xdsresource.ParseName(resourceName) - a, unref, err := c.findAuthority(n) - if err != nil { - return fmt.Errorf("attempt to trigger resource-not-found-error for resource %q of type %q, but authority %q is not found", rType.TypeName(), resourceName, n.Authority) + for _, state := range c.xdsActiveChannels { + if err := state.channel.triggerResourceNotFoundForTesting(rType, resourceName); err != nil { + return err + } } - defer unref() - a.triggerResourceNotFoundForTesting(rType, n.String()) return nil } + +func (c *clientImpl) resourceWatchStateForTesting(rType xdsresource.Type, resourceName string) (ads.ResourceWatchState, error) { + c.channelsMu.Lock() + defer c.channelsMu.Unlock() + + for _, state := range c.xdsActiveChannels { + if st, err := state.channel.ads.ResourceWatchStateForTesting(rType, resourceName); err == nil { + return st, nil + } + } + return ads.ResourceWatchState{}, fmt.Errorf("unable to find watch state for resource type %q and name %q", rType.TypeName(), resourceName) +} diff --git a/xds/internal/xdsclient/internal/internal.go b/xds/internal/xdsclient/internal/internal.go index 6301b2b2be47..cdbb86db82a8 100644 --- a/xds/internal/xdsclient/internal/internal.go +++ b/xds/internal/xdsclient/internal/internal.go @@ -30,5 +30,4 @@ var ( // identified by the given resource type and resource name. Returns a // non-nil error if there is no such resource being watched. ResourceWatchStateForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error - ) diff --git a/xds/internal/xdsclient/logging.go b/xds/internal/xdsclient/logging.go index 2269cb293da9..00b6392d6a6c 100644 --- a/xds/internal/xdsclient/logging.go +++ b/xds/internal/xdsclient/logging.go @@ -34,7 +34,3 @@ func prefixLogger(p *clientImpl) *internalgrpclog.PrefixLogger { func clientPrefix(p *clientImpl) string { return fmt.Sprintf("[xds-client %p] ", p) } - -func authorityPrefix(p *clientImpl, serverURI string) string { - return fmt.Sprintf("%s[%s] ", clientPrefix(p), serverURI) -} diff --git a/xds/internal/xdsclient/tests/ads_stream_flow_control_test.go b/xds/internal/xdsclient/tests/ads_stream_flow_control_test.go index d6cc154d0b5c..ff0243f3d462 100644 --- a/xds/internal/xdsclient/tests/ads_stream_flow_control_test.go +++ b/xds/internal/xdsclient/tests/ads_stream_flow_control_test.go @@ -516,7 +516,7 @@ func (s) TestADSFlowControl_ResourceErrors(t *testing.T) { } } -// Test ADS stream flow control with a single resource that deleted from the +// Test ADS stream flow control with a single resource that is deleted from the // management server and therefore the watcher's OnResourceDoesNotExist() // callback is expected to be invoked. Verifies that no further reads are // attempted until the callback is completely handled by the watcher. diff --git a/xds/internal/xdsclient/tests/ads_stream_watch_test.go b/xds/internal/xdsclient/tests/ads_stream_watch_test.go new file mode 100644 index 000000000000..285ba99cf5ab --- /dev/null +++ b/xds/internal/xdsclient/tests/ads_stream_watch_test.go @@ -0,0 +1,206 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/google/uuid" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + xdsinternal "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/internal" + "google.golang.org/grpc/xds/internal/xdsclient/transport/ads" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" +) + +// Tests the state transitions of the resource specific watch state within the +// ADS stream, specifically when the stream breaks (for both resources that have +// been previously received and for resources that are yet to be received). +func (s) TestADS_WatchState_StreamBreaks(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Create an xDS management server with a restartable listener. + l, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("Failed to create a local listener for the xDS management server: %v", err) + } + lis := testutils.NewRestartableListener(l) + mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{Listener: lis}) + + // Create an xDS client with bootstrap pointing to the above server. + nodeID := uuid.New().String() + bc := e2e.DefaultBootstrapContents(t, nodeID, mgmtServer.Address) + testutils.CreateBootstrapFileForTesting(t, bc) + client := createXDSClient(t, bc) + + // Create a watch for the first listener resource and verify that the timer + // is running and the watch state is `requested`. + const listenerName1 = "listener1" + ldsCancel1 := xdsresource.WatchListener(client, listenerName1, noopListenerWatcher{}) + defer ldsCancel1() + if err := waitForResourceWatchState(ctx, client, listenerName1, ads.ResourceWatchStateRequested, true); err != nil { + t.Fatal(err) + } + + // Configure the first resource on the management server. This should result + // in the resource being pushed to the xDS client and should result in the + // timer getting stopped and the watch state moving to `received`. + const routeConfigName = "route-config" + listenerResource1 := e2e.DefaultClientListener(listenerName1, routeConfigName) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listenerResource1}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + if err := waitForResourceWatchState(ctx, client, listenerName1, ads.ResourceWatchStateReceived, false); err != nil { + t.Fatal(err) + } + + // Create a watch for the second listener resource and verify that the timer + // is running and the watch state is `requested`. + const listenerName2 = "listener2" + ldsCancel2 := xdsresource.WatchListener(client, listenerName2, noopListenerWatcher{}) + defer ldsCancel2() + if err := waitForResourceWatchState(ctx, client, listenerName2, ads.ResourceWatchStateRequested, true); err != nil { + t.Fatal(err) + } + + // Stop the server to break the ADS stream. Since the first resource was + // already received, this should not change anything for it. But for the + // second resource, it should result in the timer getting stopped and the + // watch state moving to `started`. + lis.Stop() + if err := waitForResourceWatchState(ctx, client, listenerName2, ads.ResourceWatchStateStarted, false); err != nil { + t.Fatal(err) + } + if err := verifyResourceWatchState(client, listenerName1, ads.ResourceWatchStateReceived, false); err != nil { + t.Fatal(err) + } + + // Restart the server and verify that the timer is running and the watch + // state is `requested`, for the second resource. For the first resource, + // nothing should change. + lis.Restart() + if err := waitForResourceWatchState(ctx, client, listenerName2, ads.ResourceWatchStateRequested, true); err != nil { + t.Fatal(err) + } + if err := verifyResourceWatchState(client, listenerName1, ads.ResourceWatchStateReceived, false); err != nil { + t.Fatal(err) + } + + // Configure the second resource on the management server. This should result + // in the resource being pushed to the xDS client and should result in the + // timer getting stopped and the watch state moving to `received`. + listenerResource2 := e2e.DefaultClientListener(listenerName2, routeConfigName) + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listenerResource1, listenerResource2}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + if err := waitForResourceWatchState(ctx, client, listenerName2, ads.ResourceWatchStateReceived, false); err != nil { + t.Fatal(err) + } +} + +// Tests the behavior of the xDS client when a resource watch timer expires and +// verifies the resource watch state transitions as expected. +func (s) TestADS_WatchState_TimerFires(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Start an xDS management server. + mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{}) + + // Create an xDS client with bootstrap pointing to the above server, and a + // short resource expiry timeout. + nodeID := uuid.New().String() + bc := e2e.DefaultBootstrapContents(t, nodeID, mgmtServer.Address) + testutils.CreateBootstrapFileForTesting(t, bc) + client, close, err := xdsclient.NewForTesting(xdsclient.OptionsForTesting{ + Name: t.Name(), + Contents: bc, + WatchExpiryTimeout: defaultTestWatchExpiryTimeout, + }) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Create a watch for the first listener resource and verify that the timer + // is running and the watch state is `requested`. + const listenerName = "listener" + ldsCancel1 := xdsresource.WatchListener(client, listenerName, noopListenerWatcher{}) + defer ldsCancel1() + if err := waitForResourceWatchState(ctx, client, listenerName, ads.ResourceWatchStateRequested, true); err != nil { + t.Fatal(err) + } + + // Since the resource is not configured on the management server, the watch + // expiry timer is expected to fire, and the watch state should move to + // `timeout`. + if err := waitForResourceWatchState(ctx, client, listenerName, ads.ResourceWatchStateTimeout, false); err != nil { + t.Fatal(err) + } +} + +func waitForResourceWatchState(ctx context.Context, client xdsclient.XDSClient, resourceName string, wantState ads.WatchState, wantTimer bool) error { + var lastErr error + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + err := verifyResourceWatchState(client, resourceName, wantState, wantTimer) + if err == nil { + break + } + lastErr = err + } + if ctx.Err() != nil { + return fmt.Errorf("timeout when waiting for expected watch state for resource %q: %v", resourceName, lastErr) + } + return nil +} + +func verifyResourceWatchState(client xdsclient.XDSClient, resourceName string, wantState ads.WatchState, wantTimer bool) error { + resourceWatchStateForTesting := internal.ResourceWatchStateForTesting.(func(xdsclient.XDSClient, xdsresource.Type, string) (ads.ResourceWatchState, error)) + listenerResourceType := xdsinternal.ResourceTypeMapForTesting[version.V3ListenerURL].(xdsresource.Type) + gotState, err := resourceWatchStateForTesting(client, listenerResourceType, resourceName) + if err != nil { + return fmt.Errorf("failed to get watch state for resource %q: %v", resourceName, err) + } + if gotState.State != wantState { + return fmt.Errorf("watch state for resource %q is %v, want %v", resourceName, gotState.State, wantState) + } + if (gotState.ExpiryTimer != nil) != wantTimer { + return fmt.Errorf("expiry timer for resource %q is %t, want %t", resourceName, gotState.ExpiryTimer != nil, wantTimer) + } + return nil +} diff --git a/xds/internal/xdsclient/tests/authority_test.go b/xds/internal/xdsclient/tests/authority_test.go index 785d700bc54f..1947daf6dffe 100644 --- a/xds/internal/xdsclient/tests/authority_test.go +++ b/xds/internal/xdsclient/tests/authority_test.go @@ -102,10 +102,10 @@ func setupForAuthorityTests(ctx context.Context, t *testing.T, idleTimeout time. t.Fatalf("Failed to create bootstrap configuration: %v", err) } client, close, err := xdsclient.NewForTesting(xdsclient.OptionsForTesting{ - Name: t.Name(), - Contents: bootstrapContents, - WatchExpiryTimeout: defaultTestWatchExpiryTimeout, - AuthorityIdleTimeout: idleTimeout, + Name: t.Name(), + Contents: bootstrapContents, + WatchExpiryTimeout: defaultTestWatchExpiryTimeout, + IdleChannelExpiryTimeout: idleTimeout, }) if err != nil { t.Fatalf("Failed to create an xDS client: %v", err) @@ -127,14 +127,14 @@ func setupForAuthorityTests(ctx context.Context, t *testing.T, idleTimeout time. return lisDefault, lisNonDefault, client, close } -// TestAuthorityShare tests the authority sharing logic. The test verifies the +// Tests the xdsChannel sharing logic among authorities. The test verifies the // following scenarios: // - A watch for a resource name with an authority matching an existing watch // should not result in a new transport being created. // - A watch for a resource name with different authority name but same // authority config as an existing watch should not result in a new transport // being created. -func (s) TestAuthorityShare(t *testing.T) { +func (s) TestAuthority_XDSChannelSharing(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() lis, _, client, close := setupForAuthorityTests(ctx, t, time.Duration(0)) @@ -176,14 +176,13 @@ func (s) TestAuthorityShare(t *testing.T) { } } -// TestAuthorityIdle test the authority idle timeout logic. The test verifies -// that the xDS client does not close authorities immediately after the last -// watch is canceled, but waits for the configured idle timeout to expire before -// closing them. -func (s) TestAuthorityIdleTimeout(t *testing.T) { +// Test the xdsChannel idle timeout logic. The test verifies that the xDS client +// does not close xdsChannels immediately after the last watch is canceled, but +// waits for the configured idle timeout to expire before closing them. +func (s) TestAuthority_XDSChannelIdleTimeout(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - lis, _, client, close := setupForAuthorityTests(ctx, t, defaultTestIdleAuthorityTimeout) + lis, _, client, close := setupForAuthorityTests(ctx, t, defaultTestIdleChannelExpiryTimeout) defer close() // Request the first resource. Verify that a new transport is created. @@ -213,21 +212,20 @@ func (s) TestAuthorityIdleTimeout(t *testing.T) { t.Fatal("Connection to management server closed unexpectedly") } - // Wait for the authority idle timeout to fire. - time.Sleep(2 * defaultTestIdleAuthorityTimeout) - sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if _, err := conn.CloseCh.Receive(sCtx); err != nil { + // Ensure the transport is closed once the idle timeout fires. + select { + case <-conn.CloseCh.C: + case <-time.After(2 * defaultTestIdleChannelExpiryTimeout): t.Fatal("Connection to management server not closed after idle timeout expiry") } } -// TestAuthorityClientClose verifies that authorities in use and in the idle -// cache are all closed when the client is closed. -func (s) TestAuthorityClientClose(t *testing.T) { - // Set the authority idle timeout to twice the defaultTestTimeout. This will - // ensure that idle authorities stay in the cache for the duration of this - // test, until explicitly closed. +// Tests that xdsChannels in use and in the idle cache are all closed when the +// xDS client is closed. +func (s) TestAuthority_XDSChannelCloseOnClientClose(t *testing.T) { + // Set the idle timeout to twice the defaultTestTimeout. This will ensure + // that idle channels stay in the cache for the duration of this test, until + // explicitly closed. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() lisDefault, lisNonDefault, client, close := setupForAuthorityTests(ctx, t, time.Duration(2*defaultTestTimeout)) @@ -273,12 +271,12 @@ func (s) TestAuthorityClientClose(t *testing.T) { } } -// TestAuthorityRevive verifies that an authority in the idle cache is revived -// when a new watch is started on this authority. -func (s) TestAuthorityRevive(t *testing.T) { +// Tests that an xdsChannel in the idle cache is revived when a new watch is +// started on an authority. +func (s) TestAuthority_XDSChannelRevive(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - lis, _, client, close := setupForAuthorityTests(ctx, t, defaultTestIdleAuthorityTimeout) + lis, _, client, close := setupForAuthorityTests(ctx, t, defaultTestIdleChannelExpiryTimeout) defer close() // Request the first resource. Verify that a new transport is created. @@ -305,7 +303,7 @@ func (s) TestAuthorityRevive(t *testing.T) { // Wait for double the idle timeout, and the connection to the management // server should not be closed, since it was revived from the idle cache. - time.Sleep(2 * defaultTestIdleAuthorityTimeout) + time.Sleep(2 * defaultTestIdleChannelExpiryTimeout) sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() if _, err := conn.CloseCh.Receive(sCtx); err != context.DeadlineExceeded { diff --git a/xds/internal/xdsclient/tests/helpers_test.go b/xds/internal/xdsclient/tests/helpers_test.go index 0d6f222f1328..5c4175b04df0 100644 --- a/xds/internal/xdsclient/tests/helpers_test.go +++ b/xds/internal/xdsclient/tests/helpers_test.go @@ -36,10 +36,10 @@ func Test(t *testing.T) { } const ( - defaultTestWatchExpiryTimeout = 500 * time.Millisecond - defaultTestIdleAuthorityTimeout = 50 * time.Millisecond - defaultTestTimeout = 10 * time.Second - defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. + defaultTestWatchExpiryTimeout = 500 * time.Millisecond + defaultTestIdleChannelExpiryTimeout = 50 * time.Millisecond + defaultTestTimeout = 10 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. ldsName = "xdsclient-test-lds-resource" rdsName = "xdsclient-test-rds-resource" diff --git a/xds/internal/xdsclient/transport/internal/internal.go b/xds/internal/xdsclient/transport/internal/internal.go deleted file mode 100644 index 9acc33cbbf8d..000000000000 --- a/xds/internal/xdsclient/transport/internal/internal.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package internal contains functionality internal to the transport package. -package internal - -// The following vars can be overridden by tests. -var ( - // GRPCNewClient creates a new gRPC Client. - GRPCNewClient any // func(string, ...grpc.DialOption) (*grpc.ClientConn, error) -) diff --git a/xds/internal/xdsclient/transport/loadreport.go b/xds/internal/xdsclient/transport/loadreport.go deleted file mode 100644 index e47fdd9846ba..000000000000 --- a/xds/internal/xdsclient/transport/loadreport.go +++ /dev/null @@ -1,259 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package transport - -import ( - "context" - "errors" - "fmt" - "io" - "time" - - "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/xdsclient/load" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/durationpb" - - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - v3lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" - v3lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" -) - -type lrsStream = v3lrsgrpc.LoadReportingService_StreamLoadStatsClient - -// ReportLoad starts reporting loads to the management server the transport is -// configured to use. -// -// It returns a Store for the user to report loads and a function to cancel the -// load reporting. -func (t *Transport) ReportLoad() (*load.Store, func()) { - t.lrsStartStream() - return t.lrsStore, grpcsync.OnceFunc(func() { t.lrsStopStream() }) -} - -// lrsStartStream starts an LRS stream to the server, if none exists. -func (t *Transport) lrsStartStream() { - t.lrsMu.Lock() - defer t.lrsMu.Unlock() - - t.lrsRefCount++ - if t.lrsRefCount != 1 { - // Return early if the stream has already been started. - return - } - - ctx, cancel := context.WithCancel(context.Background()) - t.lrsCancelStream = cancel - - // Create a new done channel everytime a new stream is created. This ensures - // that we don't close the same channel multiple times (from lrsRunner() - // goroutine) when multiple streams are created and closed. - t.lrsRunnerDoneCh = make(chan struct{}) - go t.lrsRunner(ctx) -} - -// lrsStopStream closes the LRS stream, if this is the last user of the stream. -func (t *Transport) lrsStopStream() { - t.lrsMu.Lock() - defer t.lrsMu.Unlock() - - t.lrsRefCount-- - if t.lrsRefCount != 0 { - // Return early if the stream has other references. - return - } - - t.lrsCancelStream() - t.logger.Infof("Stopping LRS stream") - - // Wait for the runner goroutine to exit. The done channel will be - // recreated when a new stream is created. - <-t.lrsRunnerDoneCh -} - -// lrsRunner starts an LRS stream to report load data to the management server. -// It reports load at constant intervals (as configured by the management -// server) until the context is cancelled. -func (t *Transport) lrsRunner(ctx context.Context) { - defer close(t.lrsRunnerDoneCh) - - // This feature indicates that the client supports the - // LoadStatsResponse.send_all_clusters field in the LRS response. - node := proto.Clone(t.nodeProto).(*v3corepb.Node) - node.ClientFeatures = append(node.ClientFeatures, "envoy.lrs.supports_send_all_clusters") - - runLoadReportStream := func() error { - // streamCtx is created and canceled in case we terminate the stream - // early for any reason, to avoid gRPC-Go leaking the RPC's monitoring - // goroutine. - streamCtx, cancel := context.WithCancel(ctx) - defer cancel() - stream, err := v3lrsgrpc.NewLoadReportingServiceClient(t.cc).StreamLoadStats(streamCtx) - if err != nil { - t.logger.Warningf("Creating LRS stream to server %q failed: %v", t.serverURI, err) - return nil - } - t.logger.Infof("Created LRS stream to server %q", t.serverURI) - - if err := t.sendFirstLoadStatsRequest(stream, node); err != nil { - t.logger.Warningf("Sending first LRS request failed: %v", err) - return nil - } - - clusters, interval, err := t.recvFirstLoadStatsResponse(stream) - if err != nil { - t.logger.Warningf("Reading from LRS stream failed: %v", err) - return nil - } - - // We reset backoff state when we successfully receive at least one - // message from the server. - t.sendLoads(streamCtx, stream, clusters, interval) - return backoff.ErrResetBackoff - } - backoff.RunF(ctx, runLoadReportStream, t.backoff) -} - -func (t *Transport) sendLoads(ctx context.Context, stream lrsStream, clusterNames []string, interval time.Duration) { - tick := time.NewTicker(interval) - defer tick.Stop() - for { - select { - case <-tick.C: - case <-ctx.Done(): - return - } - if err := t.sendLoadStatsRequest(stream, t.lrsStore.Stats(clusterNames)); err != nil { - t.logger.Warningf("Writing to LRS stream failed: %v", err) - return - } - } -} - -func (t *Transport) sendFirstLoadStatsRequest(stream lrsStream, node *v3corepb.Node) error { - req := &v3lrspb.LoadStatsRequest{Node: node} - if t.logger.V(perRPCVerbosityLevel) { - t.logger.Infof("Sending initial LoadStatsRequest: %s", pretty.ToJSON(req)) - } - err := stream.Send(req) - if err == io.EOF { - return getStreamError(stream) - } - return err -} - -func (t *Transport) recvFirstLoadStatsResponse(stream lrsStream) ([]string, time.Duration, error) { - resp, err := stream.Recv() - if err != nil { - return nil, 0, fmt.Errorf("failed to receive first LoadStatsResponse: %v", err) - } - if t.logger.V(perRPCVerbosityLevel) { - t.logger.Infof("Received first LoadStatsResponse: %s", pretty.ToJSON(resp)) - } - - rInterval := resp.GetLoadReportingInterval() - if rInterval.CheckValid() != nil { - return nil, 0, fmt.Errorf("invalid load_reporting_interval: %v", err) - } - interval := rInterval.AsDuration() - - if resp.ReportEndpointGranularity { - // TODO(easwars): Support per endpoint loads. - return nil, 0, errors.New("lrs: endpoint loads requested, but not supported by current implementation") - } - - clusters := resp.Clusters - if resp.SendAllClusters { - // Return nil to send stats for all clusters. - clusters = nil - } - - return clusters, interval, nil -} - -func (t *Transport) sendLoadStatsRequest(stream lrsStream, loads []*load.Data) error { - clusterStats := make([]*v3endpointpb.ClusterStats, 0, len(loads)) - for _, sd := range loads { - droppedReqs := make([]*v3endpointpb.ClusterStats_DroppedRequests, 0, len(sd.Drops)) - for category, count := range sd.Drops { - droppedReqs = append(droppedReqs, &v3endpointpb.ClusterStats_DroppedRequests{ - Category: category, - DroppedCount: count, - }) - } - localityStats := make([]*v3endpointpb.UpstreamLocalityStats, 0, len(sd.LocalityStats)) - for l, localityData := range sd.LocalityStats { - lid, err := internal.LocalityIDFromString(l) - if err != nil { - return err - } - loadMetricStats := make([]*v3endpointpb.EndpointLoadMetricStats, 0, len(localityData.LoadStats)) - for name, loadData := range localityData.LoadStats { - loadMetricStats = append(loadMetricStats, &v3endpointpb.EndpointLoadMetricStats{ - MetricName: name, - NumRequestsFinishedWithMetric: loadData.Count, - TotalMetricValue: loadData.Sum, - }) - } - localityStats = append(localityStats, &v3endpointpb.UpstreamLocalityStats{ - Locality: &v3corepb.Locality{ - Region: lid.Region, - Zone: lid.Zone, - SubZone: lid.SubZone, - }, - TotalSuccessfulRequests: localityData.RequestStats.Succeeded, - TotalRequestsInProgress: localityData.RequestStats.InProgress, - TotalErrorRequests: localityData.RequestStats.Errored, - TotalIssuedRequests: localityData.RequestStats.Issued, - LoadMetricStats: loadMetricStats, - UpstreamEndpointStats: nil, // TODO: populate for per endpoint loads. - }) - } - - clusterStats = append(clusterStats, &v3endpointpb.ClusterStats{ - ClusterName: sd.Cluster, - ClusterServiceName: sd.Service, - UpstreamLocalityStats: localityStats, - TotalDroppedRequests: sd.TotalDrops, - DroppedRequests: droppedReqs, - LoadReportInterval: durationpb.New(sd.ReportInterval), - }) - } - - req := &v3lrspb.LoadStatsRequest{ClusterStats: clusterStats} - if t.logger.V(perRPCVerbosityLevel) { - t.logger.Infof("Sending LRS loads: %s", pretty.ToJSON(req)) - } - err := stream.Send(req) - if err == io.EOF { - return getStreamError(stream) - } - return err -} - -func getStreamError(stream lrsStream) error { - for { - if _, err := stream.Recv(); err != nil { - return err - } - } -} diff --git a/xds/internal/xdsclient/transport/lrs/lrs_stream.go b/xds/internal/xdsclient/transport/lrs/lrs_stream.go index 36e70bc7170f..f9140b2e3c4b 100644 --- a/xds/internal/xdsclient/transport/lrs/lrs_stream.go +++ b/xds/internal/xdsclient/transport/lrs/lrs_stream.go @@ -56,7 +56,7 @@ type StreamImpl struct { // The following fields are initialized when a Stream instance is created // and are read-only afterwards, and hence can be accessed without a mutex. transport transport.Interface // Transport to use for LRS stream. - backoff func(int) time.Duration // Backoff for retries after stream failures. + backoff func(int) time.Duration // Backoff for retries, after stream failures. nodeProto *v3corepb.Node // Identifies the gRPC application. doneCh chan struct{} // To notify exit of LRS goroutine. logger *igrpclog.PrefixLogger @@ -71,7 +71,7 @@ type StreamImpl struct { // StreamOpts holds the options for creating an lrsStream. type StreamOpts struct { Transport transport.Interface // xDS transport to create the stream on. - Backoff func(int) time.Duration // Backoff for retries after stream failures. + Backoff func(int) time.Duration // Backoff for retries, after stream failures. NodeProto *v3corepb.Node // Node proto to identify the gRPC application. LogPrefix string // Prefix to be used for log messages. } @@ -119,6 +119,13 @@ func (lrs *StreamImpl) ReportLoad() (*load.Store, func()) { if lrs.refCount != 0 { return } + + if lrs.cancelStream == nil { + // It is possible that Stop() is called before the cleanup function + // is called, thereby setting cancelStream to nil. Hence we need a + // nil check here bofore invoking the cancel function. + return + } lrs.cancelStream() lrs.cancelStream = nil lrs.logger.Infof("Stopping StreamImpl") diff --git a/xds/internal/xdsclient/transport/transport.go b/xds/internal/xdsclient/transport/transport.go deleted file mode 100644 index 59b221727a1f..000000000000 --- a/xds/internal/xdsclient/transport/transport.go +++ /dev/null @@ -1,702 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package transport implements the xDS transport protocol functionality -// required by the xdsclient. -package transport - -import ( - "context" - "errors" - "fmt" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/buffer" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/internal/xds/bootstrap" - "google.golang.org/grpc/keepalive" - xdsclientinternal "google.golang.org/grpc/xds/internal/xdsclient/internal" - "google.golang.org/grpc/xds/internal/xdsclient/load" - transportinternal "google.golang.org/grpc/xds/internal/xdsclient/transport/internal" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - "google.golang.org/protobuf/types/known/anypb" - - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" - v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" - statuspb "google.golang.org/genproto/googleapis/rpc/status" -) - -type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient - -func init() { - transportinternal.GRPCNewClient = grpc.NewClient - xdsclientinternal.NewADSStream = func(ctx context.Context, cc *grpc.ClientConn) (adsStream, error) { - return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx) - } -} - -// Any per-RPC level logs which print complete request or response messages -// should be gated at this verbosity level. Other per-RPC level logs which print -// terse output should be at `INFO` and verbosity 2. -const perRPCVerbosityLevel = 9 - -// Transport provides a resource-type agnostic implementation of the xDS -// transport protocol. At this layer, resource contents are supposed to be -// opaque blobs which should be meaningful only to the xDS data model layer -// which is implemented by the `xdsresource` package. -// -// Under the hood, it owns the gRPC connection to a single management server and -// manages the lifecycle of ADS/LRS streams. It uses the xDS v3 transport -// protocol version. -type Transport struct { - // These fields are initialized at creation time and are read-only afterwards. - cc *grpc.ClientConn // ClientConn to the management server. - serverURI string // URI of the management server. - onRecvHandler OnRecvHandlerFunc // Resource update handler. xDS data model layer. - onErrorHandler func(error) // To report underlying stream errors. - onSendHandler OnSendHandlerFunc // To report resources requested on ADS stream. - lrsStore *load.Store // Store returned to user for pushing loads. - backoff func(int) time.Duration // Backoff after stream failures. - nodeProto *v3corepb.Node // Identifies the gRPC application. - logger *grpclog.PrefixLogger // Prefix logger for transport logs. - adsRunnerCancel context.CancelFunc // CancelFunc for the ADS goroutine. - adsRunnerDoneCh chan struct{} // To notify exit of ADS goroutine. - lrsRunnerDoneCh chan struct{} // To notify exit of LRS goroutine. - - // These channels enable synchronization amongst the different goroutines - // spawned by the transport, and between asynchronous events resulting from - // receipt of responses from the management server. - adsStreamCh chan adsStream // New ADS streams are pushed here. - adsRequestCh *buffer.Unbounded // Resource and ack requests are pushed here. - - // mu guards the following runtime state maintained by the transport. - mu sync.Mutex - // resources is map from resource type URL to the set of resource names - // being requested for that type. When the ADS stream is restarted, the - // transport requests all these resources again from the management server. - resources map[string]map[string]bool - // versions is a map from resource type URL to the most recently ACKed - // version for that resource. Resource versions are a property of the - // resource type and not the stream, and will not be reset upon stream - // restarts. - versions map[string]string - // nonces is a map from resource type URL to the most recently received - // nonce for that resource type. Nonces are a property of the ADS stream and - // will be reset upon stream restarts. - nonces map[string]string - - lrsMu sync.Mutex // Protects all LRS state. - lrsCancelStream context.CancelFunc // CancelFunc for the LRS stream. - lrsRefCount int // Reference count on the load store. -} - -// OnRecvHandlerFunc is the implementation at the xDS data model layer, which -// determines if the configuration received from the management server can be -// applied locally or not. -// -// A nil error is returned from this function when the data model layer believes -// that the received configuration is good and can be applied locally. This will -// cause the transport layer to send an ACK to the management server. A non-nil -// error is returned from this function when the data model layer believes -// otherwise, and this will cause the transport layer to send a NACK. -// -// The implementation is expected to invoke onDone when local processing of the -// update is complete, i.e. it is consumed by all watchers. -type OnRecvHandlerFunc func(update ResourceUpdate, onDone func()) error - -// OnSendHandlerFunc is the implementation at the authority, which handles state -// changes for the resource watch and stop watch timers accordingly. -type OnSendHandlerFunc func(update *ResourceSendInfo) - -// ResourceUpdate is a representation of the configuration update received from -// the management server. It only contains fields which are useful to the data -// model layer, and layers above it. -type ResourceUpdate struct { - // Resources is the list of resources received from the management server. - Resources []*anypb.Any - // URL is the resource type URL for the above resources. - URL string - // Version is the resource version, for the above resources, as specified by - // the management server. - Version string -} - -// Options specifies configuration knobs used when creating a new Transport. -type Options struct { - // ServerCfg contains all the configuration required to connect to the xDS - // management server. - ServerCfg *bootstrap.ServerConfig - // OnRecvHandler is the component which makes ACK/NACK decisions based on - // the received resources. - // - // Invoked inline and implementations must not block. - OnRecvHandler OnRecvHandlerFunc - // OnErrorHandler provides a way for the transport layer to report - // underlying stream errors. These can be bubbled all the way up to the user - // of the xdsClient. - // - // Invoked inline and implementations must not block. - OnErrorHandler func(error) - // OnSendHandler provides a way for the transport layer to report underlying - // resource requests sent on the stream. However, Send() on the ADS stream will - // return successfully as long as: - // 1. there is enough flow control quota to send the message. - // 2. the message is added to the send buffer. - // However, the connection may fail after the callback is invoked and before - // the message is actually sent on the wire. This is accepted. - // - // Invoked inline and implementations must not block. - OnSendHandler func(*ResourceSendInfo) - // Backoff controls the amount of time to backoff before recreating failed - // ADS streams. If unspecified, a default exponential backoff implementation - // is used. For more details, see: - // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. - Backoff func(retries int) time.Duration - // Logger does logging with a prefix. - Logger *grpclog.PrefixLogger - // NodeProto contains the Node proto to be used in xDS requests. This will be - // of type *v3corepb.Node. - NodeProto *v3corepb.Node -} - -// New creates a new Transport. -func New(opts Options) (*Transport, error) { - switch { - case opts.OnRecvHandler == nil: - return nil, errors.New("missing OnRecv callback handler when creating a new transport") - case opts.OnErrorHandler == nil: - return nil, errors.New("missing OnError callback handler when creating a new transport") - case opts.OnSendHandler == nil: - return nil, errors.New("missing OnSend callback handler when creating a new transport") - } - - // Dial the xDS management server with dial options specified by the server - // configuration and a static keepalive configuration that is common across - // gRPC language implementations. - kpCfg := grpc.WithKeepaliveParams(keepalive.ClientParameters{ - Time: 5 * time.Minute, - Timeout: 20 * time.Second, - }) - dopts := append([]grpc.DialOption{kpCfg}, opts.ServerCfg.DialOptions()...) - grpcNewClient := transportinternal.GRPCNewClient.(func(string, ...grpc.DialOption) (*grpc.ClientConn, error)) - cc, err := grpcNewClient(opts.ServerCfg.ServerURI(), dopts...) - if err != nil { - // An error from a non-blocking dial indicates something serious. - return nil, fmt.Errorf("failed to create a transport to the management server %q: %v", opts.ServerCfg.ServerURI(), err) - } - cc.Connect() - - boff := opts.Backoff - if boff == nil { - boff = backoff.DefaultExponential.Backoff - } - ret := &Transport{ - cc: cc, - serverURI: opts.ServerCfg.ServerURI(), - onRecvHandler: opts.OnRecvHandler, - onErrorHandler: opts.OnErrorHandler, - onSendHandler: opts.OnSendHandler, - lrsStore: load.NewStore(), - backoff: boff, - nodeProto: opts.NodeProto, - logger: opts.Logger, - - adsStreamCh: make(chan adsStream, 1), - adsRequestCh: buffer.NewUnbounded(), - resources: make(map[string]map[string]bool), - versions: make(map[string]string), - nonces: make(map[string]string), - adsRunnerDoneCh: make(chan struct{}), - } - - // This context is used for sending and receiving RPC requests and - // responses. It is also used by all the goroutines spawned by this - // Transport. Therefore, cancelling this context when the transport is - // closed will essentially cancel any pending RPCs, and cause the goroutines - // to terminate. - ctx, cancel := context.WithCancel(context.Background()) - ret.adsRunnerCancel = cancel - go ret.adsRunner(ctx) - - ret.logger.Infof("Created transport to server %q", ret.serverURI) - return ret, nil -} - -// resourceRequest wraps the resource type url and the resource names requested -// by the user of this transport. -type resourceRequest struct { - resources []string - url string -} - -// SendRequest sends out an ADS request for the provided resources of the -// specified resource type. -// -// The request is sent out asynchronously. If no valid stream exists at the time -// of processing this request, it is queued and will be sent out once a valid -// stream exists. -// -// If a successful response is received, the update handler callback provided at -// creation time is invoked. If an error is encountered, the stream error -// handler callback provided at creation time is invoked. -func (t *Transport) SendRequest(url string, resources []string) { - t.adsRequestCh.Put(&resourceRequest{ - url: url, - resources: resources, - }) -} - -// ResourceSendInfo wraps the names and url of resources sent to the management -// server. This is used by the `authority` type to start/stop the watch timer -// associated with every resource in the update. -type ResourceSendInfo struct { - ResourceNames []string - URL string -} - -func (t *Transport) sendAggregatedDiscoveryServiceRequest(stream adsStream, sendNodeProto bool, resourceNames []string, resourceURL, version, nonce string, nackErr error) error { - req := &v3discoverypb.DiscoveryRequest{ - TypeUrl: resourceURL, - ResourceNames: resourceNames, - VersionInfo: version, - ResponseNonce: nonce, - } - if sendNodeProto { - req.Node = t.nodeProto - } - if nackErr != nil { - req.ErrorDetail = &statuspb.Status{ - Code: int32(codes.InvalidArgument), Message: nackErr.Error(), - } - } - if err := stream.Send(req); err != nil { - return err - } - if t.logger.V(perRPCVerbosityLevel) { - t.logger.Infof("ADS request sent: %v", pretty.ToJSON(req)) - } else { - if t.logger.V(2) { - t.logger.Infof("ADS request sent for type %q, resources: %v, version %q, nonce %q", resourceURL, resourceNames, version, nonce) - } - } - t.onSendHandler(&ResourceSendInfo{URL: resourceURL, ResourceNames: resourceNames}) - return nil -} - -func (t *Transport) recvAggregatedDiscoveryServiceResponse(stream adsStream) (resources []*anypb.Any, resourceURL, version, nonce string, err error) { - resp, err := stream.Recv() - if err != nil { - return nil, "", "", "", err - } - if t.logger.V(perRPCVerbosityLevel) { - t.logger.Infof("ADS response received: %v", pretty.ToJSON(resp)) - } else if t.logger.V(2) { - t.logger.Infof("ADS response received for type %q, version %q, nonce %q", resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce()) - } - return resp.GetResources(), resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce(), nil -} - -// adsRunner starts an ADS stream (and backs off exponentially, if the previous -// stream failed without receiving a single reply) and runs the sender and -// receiver routines to send and receive data from the stream respectively. -func (t *Transport) adsRunner(ctx context.Context) { - defer close(t.adsRunnerDoneCh) - - go t.send(ctx) - - // We reset backoff state when we successfully receive at least one - // message from the server. - runStreamWithBackoff := func() error { - newStream := xdsclientinternal.NewADSStream.(func(context.Context, *grpc.ClientConn) (adsStream, error)) - stream, err := newStream(ctx, t.cc) - if err != nil { - t.onErrorHandler(err) - t.logger.Warningf("Creating new ADS stream failed: %v", err) - return nil - } - t.logger.Infof("ADS stream created") - - select { - case <-t.adsStreamCh: - default: - } - t.adsStreamCh <- stream - msgReceived := t.recv(ctx, stream) - if msgReceived { - return backoff.ErrResetBackoff - } - return nil - } - backoff.RunF(ctx, runStreamWithBackoff, t.backoff) -} - -// send is a separate goroutine for sending resource requests on the ADS stream. -// -// For every new stream received on the stream channel, all existing resources -// are re-requested from the management server. -// -// For every new resource request received on the resources channel, the -// resources map is updated (this ensures that resend will pick them up when -// there are new streams) and the appropriate request is sent out. -func (t *Transport) send(ctx context.Context) { - var stream adsStream - // The xDS protocol only requires that we send the node proto in the first - // discovery request on every stream. Sending the node proto in every - // request message wastes CPU resources on the client and the server. - sentNodeProto := false - for { - select { - case <-ctx.Done(): - return - case stream = <-t.adsStreamCh: - // We have a new stream and we've to ensure that the node proto gets - // sent out in the first request on the stream. - var err error - if sentNodeProto, err = t.sendExisting(stream); err != nil { - // Send failed, clear the current stream. Attempt to resend will - // only be made after a new stream is created. - stream = nil - continue - } - case u, ok := <-t.adsRequestCh.Get(): - if !ok { - // No requests will be sent after the adsRequestCh buffer is closed. - return - } - t.adsRequestCh.Load() - - var ( - resources []string - url, version, nonce string - send bool - nackErr error - ) - switch update := u.(type) { - case *resourceRequest: - resources, url, version, nonce = t.processResourceRequest(update) - case *ackRequest: - resources, url, version, nonce, send = t.processAckRequest(update, stream) - if !send { - continue - } - nackErr = update.nackErr - } - if stream == nil { - // There's no stream yet. Skip the request. This request - // will be resent to the new streams. If no stream is - // created, the watcher will timeout (same as server not - // sending response back). - continue - } - if err := t.sendAggregatedDiscoveryServiceRequest(stream, !sentNodeProto, resources, url, version, nonce, nackErr); err != nil { - t.logger.Warningf("Sending ADS request for resources: %q, url: %q, version: %q, nonce: %q failed: %v", resources, url, version, nonce, err) - // Send failed, clear the current stream. - stream = nil - } - sentNodeProto = true - } - } -} - -// sendExisting sends out xDS requests for existing resources when recovering -// from a broken stream. -// -// We call stream.Send() here with the lock being held. It should be OK to do -// that here because the stream has just started and Send() usually returns -// quickly (once it pushes the message onto the transport layer) and is only -// ever blocked if we don't have enough flow control quota. -// -// Returns true if the node proto was sent. -func (t *Transport) sendExisting(stream adsStream) (sentNodeProto bool, err error) { - t.mu.Lock() - defer t.mu.Unlock() - - // Reset only the nonces map when the stream restarts. - // - // xDS spec says the following. See section: - // https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#ack-nack-and-resource-type-instance-version - // - // Note that the version for a resource type is not a property of an - // individual xDS stream but rather a property of the resources themselves. If - // the stream becomes broken and the client creates a new stream, the client’s - // initial request on the new stream should indicate the most recent version - // seen by the client on the previous stream - t.nonces = make(map[string]string) - - // Send node proto only in the first request on the stream. - for url, resources := range t.resources { - if len(resources) == 0 { - continue - } - if err := t.sendAggregatedDiscoveryServiceRequest(stream, !sentNodeProto, mapToSlice(resources), url, t.versions[url], "", nil); err != nil { - t.logger.Warningf("Sending ADS request for resources: %q, url: %q, version: %q, nonce: %q failed: %v", resources, url, t.versions[url], "", err) - return false, err - } - sentNodeProto = true - } - - return sentNodeProto, nil -} - -// recv receives xDS responses on the provided ADS stream and branches out to -// message specific handlers. Returns true if at least one message was -// successfully received. -func (t *Transport) recv(ctx context.Context, stream adsStream) bool { - // Initialize the flow control quota for the stream. This helps to block the - // next read until the previous one is consumed by all watchers. - fc := newADSFlowControl() - - msgReceived := false - for { - // Wait for ADS stream level flow control to be available. - if !fc.wait(ctx) { - if t.logger.V(2) { - t.logger.Infof("ADS stream context canceled") - } - return msgReceived - } - - resources, url, rVersion, nonce, err := t.recvAggregatedDiscoveryServiceResponse(stream) - if err != nil { - // Note that we do not consider it an error if the ADS stream was closed - // after having received a response on the stream. This is because there - // are legitimate reasons why the server may need to close the stream during - // normal operations, such as needing to rebalance load or the underlying - // connection hitting its max connection age limit. - // (see [gRFC A9](https://github.com/grpc/proposal/blob/master/A9-server-side-conn-mgt.md)). - if msgReceived { - err = xdsresource.NewErrorf(xdsresource.ErrTypeStreamFailedAfterRecv, err.Error()) - } - t.onErrorHandler(err) - t.logger.Warningf("ADS stream closed: %v", err) - return msgReceived - } - msgReceived = true - - u := ResourceUpdate{ - Resources: resources, - URL: url, - Version: rVersion, - } - fc.setPending() - if err = t.onRecvHandler(u, fc.onDone); xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceTypeUnsupported { - t.logger.Warningf("%v", err) - continue - } - // If the data model layer returned an error, we need to NACK the - // response in which case we need to set the version to the most - // recently accepted version of this resource type. - if err != nil { - t.mu.Lock() - t.adsRequestCh.Put(&ackRequest{ - url: url, - nonce: nonce, - stream: stream, - version: t.versions[url], - nackErr: err, - }) - t.mu.Unlock() - t.logger.Warningf("Sending NACK for resource type: %q, version: %q, nonce: %q, reason: %v", url, rVersion, nonce, err) - continue - } - t.adsRequestCh.Put(&ackRequest{ - url: url, - nonce: nonce, - stream: stream, - version: rVersion, - }) - if t.logger.V(2) { - t.logger.Infof("Sending ACK for resource type: %q, version: %q, nonce: %q", url, rVersion, nonce) - } - } -} - -func mapToSlice(m map[string]bool) []string { - ret := make([]string, 0, len(m)) - for i := range m { - ret = append(ret, i) - } - return ret -} - -func sliceToMap(ss []string) map[string]bool { - ret := make(map[string]bool, len(ss)) - for _, s := range ss { - ret[s] = true - } - return ret -} - -// processResourceRequest pulls the fields needed to send out an ADS request. -// The resource type and the list of resources to request are provided by the -// user, while the version and nonce are maintained internally. -// -// The resources map, which keeps track of the resources being requested, is -// updated here. Any subsequent stream failure will re-request resources stored -// in this map. -// -// Returns the list of resources, resource type url, version and nonce. -func (t *Transport) processResourceRequest(req *resourceRequest) ([]string, string, string, string) { - t.mu.Lock() - defer t.mu.Unlock() - - resources := sliceToMap(req.resources) - t.resources[req.url] = resources - return req.resources, req.url, t.versions[req.url], t.nonces[req.url] -} - -type ackRequest struct { - url string // Resource type URL. - version string // NACK if version is an empty string. - nonce string - nackErr error // nil for ACK, non-nil for NACK. - // ACK/NACK are tagged with the stream it's for. When the stream is down, - // all the ACK/NACK for this stream will be dropped, and the version/nonce - // won't be updated. - stream grpc.ClientStream -} - -// processAckRequest pulls the fields needed to send out an ADS ACK. The nonces -// and versions map is updated. -// -// Returns the list of resources, resource type url, version, nonce, and an -// indication of whether an ACK should be sent on the wire or not. -func (t *Transport) processAckRequest(ack *ackRequest, stream grpc.ClientStream) ([]string, string, string, string, bool) { - if ack.stream != stream { - // If ACK's stream isn't the current sending stream, this means the ACK - // was pushed to queue before the old stream broke, and a new stream has - // been started since. Return immediately here so we don't update the - // nonce for the new stream. - return nil, "", "", "", false - } - - t.mu.Lock() - defer t.mu.Unlock() - - // Update the nonce irrespective of whether we send the ACK request on wire. - // An up-to-date nonce is required for the next request. - nonce := ack.nonce - t.nonces[ack.url] = nonce - - s, ok := t.resources[ack.url] - if !ok || len(s) == 0 { - // We don't send the ACK request if there are no resources of this type - // in our resources map. This can be either when the server sends - // responses before any request, or the resources are removed while the - // ackRequest was in queue). If we send a request with an empty - // resource name list, the server may treat it as a wild card and send - // us everything. - return nil, "", "", "", false - } - resources := mapToSlice(s) - - // Update the versions map only when we plan to send an ACK. - if ack.nackErr == nil { - t.versions[ack.url] = ack.version - } - - return resources, ack.url, ack.version, nonce, true -} - -// Close closes the Transport and frees any associated resources. -func (t *Transport) Close() { - t.adsRunnerCancel() - <-t.adsRunnerDoneCh - t.adsRequestCh.Close() - t.cc.Close() -} - -// ChannelConnectivityStateForTesting returns the connectivity state of the gRPC -// channel to the management server. -// -// Only for testing purposes. -func (t *Transport) ChannelConnectivityStateForTesting() connectivity.State { - return t.cc.GetState() -} - -// adsFlowControl implements ADS stream level flow control that enables the -// transport to block the reading of the next message off of the stream until -// the previous update is consumed by all watchers. -// -// The lifetime of the flow control is tied to the lifetime of the stream. -type adsFlowControl struct { - logger *grpclog.PrefixLogger - - // Whether the most recent update is pending consumption by all watchers. - pending atomic.Bool - // Channel used to notify when all the watchers have consumed the most - // recent update. Wait() blocks on reading a value from this channel. - readyCh chan struct{} -} - -// newADSFlowControl returns a new adsFlowControl. -func newADSFlowControl() *adsFlowControl { - return &adsFlowControl{readyCh: make(chan struct{}, 1)} -} - -// setPending changes the internal state to indicate that there is an update -// pending consumption by all watchers. -func (fc *adsFlowControl) setPending() { - fc.pending.Store(true) -} - -// wait blocks until all the watchers have consumed the most recent update and -// returns true. If the context expires before that, it returns false. -func (fc *adsFlowControl) wait(ctx context.Context) bool { - // If there is no pending update, there is no need to block. - if !fc.pending.Load() { - // If all watchers finished processing the most recent update before the - // `recv` goroutine made the next call to `Wait()`, there would be an - // entry in the readyCh channel that needs to be drained to ensure that - // the next call to `Wait()` doesn't unblock before it actually should. - select { - case <-fc.readyCh: - default: - } - return true - } - - select { - case <-ctx.Done(): - return false - case <-fc.readyCh: - return true - } -} - -// onDone indicates that all watchers have consumed the most recent update. -func (fc *adsFlowControl) onDone() { - fc.pending.Store(false) - - select { - // Writes to the readyCh channel should not block ideally. The default - // branch here is to appease the paranoid mind. - case fc.readyCh <- struct{}{}: - default: - if fc.logger.V(2) { - fc.logger.Infof("ADS stream flow control readyCh is full") - } - } -} diff --git a/xds/internal/xdsclient/transport/transport_new_test.go b/xds/internal/xdsclient/transport/transport_new_test.go deleted file mode 100644 index 3bd5ac9d2486..000000000000 --- a/xds/internal/xdsclient/transport/transport_new_test.go +++ /dev/null @@ -1,100 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package transport_test - -import ( - "strings" - "testing" - - "google.golang.org/grpc/internal/xds/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/transport" - - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" -) - -// TestNew covers that New() returns an error if the input *ServerConfig -// contains invalid content. -func (s) TestNew(t *testing.T) { - serverCfg, err := bootstrap.ServerConfigForTesting(bootstrap.ServerConfigTestingOptions{URI: "server-address"}) - if err != nil { - t.Fatalf("Failed to create server config for testing: %v", err) - } - - tests := []struct { - name string - opts transport.Options - wantErrStr string - }{ - { - name: "missing onRecv handler", - opts: transport.Options{ - ServerCfg: serverCfg, - NodeProto: &v3corepb.Node{}, - }, - wantErrStr: "missing OnRecv callback handler when creating a new transport", - }, - { - name: "missing onError handler", - opts: transport.Options{ - ServerCfg: serverCfg, - NodeProto: &v3corepb.Node{}, - OnRecvHandler: noopRecvHandler, // No data model layer validation. - OnSendHandler: func(*transport.ResourceSendInfo) {}, - }, - wantErrStr: "missing OnError callback handler when creating a new transport", - }, - - { - name: "missing onSend handler", - opts: transport.Options{ - ServerCfg: serverCfg, - NodeProto: &v3corepb.Node{}, - OnRecvHandler: noopRecvHandler, // No data model layer validation. - OnErrorHandler: func(error) {}, - }, - wantErrStr: "missing OnSend callback handler when creating a new transport", - }, - { - name: "happy case", - opts: transport.Options{ - ServerCfg: serverCfg, - NodeProto: &v3corepb.Node{}, - OnRecvHandler: noopRecvHandler, // No data model layer validation. - OnErrorHandler: func(error) {}, - OnSendHandler: func(*transport.ResourceSendInfo) {}, - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - c, err := transport.New(test.opts) - defer func() { - if c != nil { - c.Close() - } - }() - if (err != nil) != (test.wantErrStr != "") { - t.Fatalf("New(%+v) = %v, wantErr: %v", test.opts, err, test.wantErrStr) - } - if err != nil && !strings.Contains(err.Error(), test.wantErrStr) { - t.Fatalf("New(%+v) = %v, wantErr: %v", test.opts, err, test.wantErrStr) - } - }) - } -} diff --git a/xds/internal/xdsclient/transport/transport_test.go b/xds/internal/xdsclient/transport/transport_test.go deleted file mode 100644 index b51f58b742f5..000000000000 --- a/xds/internal/xdsclient/transport/transport_test.go +++ /dev/null @@ -1,98 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package transport_test - -import ( - "testing" - - "google.golang.org/grpc" - "google.golang.org/grpc/internal/grpctest" - internalbootstrap "google.golang.org/grpc/internal/xds/bootstrap" - "google.golang.org/grpc/xds/internal/xdsclient/transport" - "google.golang.org/grpc/xds/internal/xdsclient/transport/internal" - - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" -) - -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} - -var noopRecvHandler = func(_ transport.ResourceUpdate, onDone func()) error { - onDone() - return nil -} - -func (s) TestNewWithGRPCDial(t *testing.T) { - // Override the dialer with a custom one. - customDialerCalled := false - customDialer := func(target string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { - customDialerCalled = true - return grpc.NewClient(target, opts...) - } - oldDial := internal.GRPCNewClient - internal.GRPCNewClient = customDialer - defer func() { internal.GRPCNewClient = oldDial }() - - serverCfg, err := internalbootstrap.ServerConfigForTesting(internalbootstrap.ServerConfigTestingOptions{URI: "server-address"}) - if err != nil { - t.Fatalf("Failed to create server config for testing: %v", err) - } - // Create a new transport and ensure that the custom dialer was called. - opts := transport.Options{ - ServerCfg: serverCfg, - NodeProto: &v3corepb.Node{}, - OnRecvHandler: func(update transport.ResourceUpdate, onDone func()) error { - onDone() - return nil - }, - OnErrorHandler: func(error) {}, - OnSendHandler: func(*transport.ResourceSendInfo) {}, - } - c, err := transport.New(opts) - if err != nil { - t.Fatalf("transport.New(%v) failed: %v", opts, err) - } - defer c.Close() - - if !customDialerCalled { - t.Fatalf("transport.New(%+v) custom dialer called = false, want true", opts) - } - customDialerCalled = false - - // Reset the dialer, create a new transport and ensure that our custom - // dialer is no longer called. - internal.GRPCNewClient = grpc.NewClient - c, err = transport.New(opts) - defer func() { - if c != nil { - c.Close() - } - }() - if err != nil { - t.Fatalf("transport.New(%v) failed: %v", opts, err) - } - - if customDialerCalled { - t.Fatalf("transport.New(%+v) custom dialer called = true, want false", opts) - } -} From 70e8931a0e13a56823c4802015bfe52c55f330ee Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Fri, 1 Nov 2024 14:43:55 -0700 Subject: [PATCH 41/57] transport: remove useless trampoline function (#7801) --- clientconn.go | 2 +- internal/transport/http2_client.go | 4 +-- internal/transport/transport.go | 7 ----- internal/transport/transport_test.go | 38 ++++++++++++++-------------- 4 files changed, 22 insertions(+), 29 deletions(-) diff --git a/clientconn.go b/clientconn.go index 188109bb72ef..091ccd952a66 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1371,7 +1371,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, defer cancel() copts.ChannelzParent = ac.channelz - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) + newTr, err := transport.NewHTTP2Client(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { if logger.V(2) { logger.Infof("Creating new client transport to %q: %v", addr, err) diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 62b81885d8ef..6d7374ed1b94 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -199,10 +199,10 @@ func isTemporary(err error) bool { return true } -// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// NewHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { +func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ ClientTransport, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { diff --git a/internal/transport/transport.go b/internal/transport/transport.go index e12cb0bc914b..4e01a54d0c1d 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -39,7 +39,6 @@ import ( "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" - "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" @@ -725,12 +724,6 @@ type ConnectOptions struct { BufferPool mem.BufferPool } -// NewClientTransport establishes the transport with the required ConnectOptions -// and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) -} - // Options provides additional hints and information for message // transmission. type Options struct { diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 4752c785b59d..766a59b99657 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -465,7 +465,7 @@ func setUpWithOptions(t *testing.T, port int, sc *ServerConfig, ht hType, copts copts.ChannelzParent = channelzSubChannel(t) connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second)) - ct, connErr := NewClientTransport(connectCtx, context.Background(), addr, copts, func(GoAwayReason) {}) + ct, connErr := NewHTTP2Client(connectCtx, context.Background(), addr, copts, func(GoAwayReason) {}) if connErr != nil { cancel() // Do not cancel in success path. t.Fatalf("failed to create transport: %v", connErr) @@ -496,7 +496,7 @@ func setUpWithNoPingServer(t *testing.T, copts ConnectOptions, connCh chan net.C connCh <- conn }() connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second)) - tr, err := NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}) + tr, err := NewHTTP2Client(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}) if err != nil { cancel() // Do not cancel in success path. // Server clean-up. @@ -1353,23 +1353,23 @@ func (s) TestClientHonorsConnectContext(t *testing.T) { parent := channelzSubChannel(t) copts := ConnectOptions{ChannelzParent: parent} - _, err = NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}) + _, err = NewHTTP2Client(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}) if err == nil { - t.Fatalf("NewClientTransport() returned successfully; wanted error") + t.Fatalf("NewHTTP2Client() returned successfully; wanted error") } - t.Logf("NewClientTransport() = _, %v", err) + t.Logf("NewHTTP2Client() = _, %v", err) if time.Since(timeBefore) > 3*time.Second { - t.Fatalf("NewClientTransport returned > 2.9s after context cancellation") + t.Fatalf("NewHTTP2Client returned > 2.9s after context cancellation") } // Test context deadline. connectCtx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() - _, err = NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}) + _, err = NewHTTP2Client(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}) if err == nil { - t.Fatalf("NewClientTransport() returned successfully; wanted error") + t.Fatalf("NewHTTP2Client() returned successfully; wanted error") } - t.Logf("NewClientTransport() = _, %v", err) + t.Logf("NewHTTP2Client() = _, %v", err) } func (s) TestClientWithMisbehavedServer(t *testing.T) { @@ -1445,7 +1445,7 @@ func (s) TestClientWithMisbehavedServer(t *testing.T) { parent := channelzSubChannel(t) copts := ConnectOptions{ChannelzParent: parent} - ct, err := NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}) + ct, err := NewHTTP2Client(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}) if err != nil { t.Fatalf("Error while creating client transport: %v", err) } @@ -2436,7 +2436,7 @@ func (ac *attrTransportCreds) Clone() credentials.TransportCredentials { } // TestClientHandshakeInfo adds attributes to the resolver.Address passes to -// NewClientTransport and verifies that these attributes are received by the +// NewHTTP2Client and verifies that these attributes are received by the // transport credential handshaker. func (s) TestClientHandshakeInfo(t *testing.T) { server := setUpServerOnly(t, 0, &ServerConfig{}, pingpong) @@ -2458,9 +2458,9 @@ func (s) TestClientHandshakeInfo(t *testing.T) { TransportCredentials: creds, ChannelzParent: channelzSubChannel(t), } - tr, err := NewClientTransport(ctx, ctx, addr, copts, func(GoAwayReason) {}) + tr, err := NewHTTP2Client(ctx, ctx, addr, copts, func(GoAwayReason) {}) if err != nil { - t.Fatalf("NewClientTransport(): %v", err) + t.Fatalf("NewHTTP2Client(): %v", err) } defer tr.Close(fmt.Errorf("closed manually by test")) @@ -2471,7 +2471,7 @@ func (s) TestClientHandshakeInfo(t *testing.T) { } // TestClientHandshakeInfoDialer adds attributes to the resolver.Address passes to -// NewClientTransport and verifies that these attributes are received by a custom +// NewHTTP2Client and verifies that these attributes are received by a custom // dialer. func (s) TestClientHandshakeInfoDialer(t *testing.T) { server := setUpServerOnly(t, 0, &ServerConfig{}, pingpong) @@ -2499,9 +2499,9 @@ func (s) TestClientHandshakeInfoDialer(t *testing.T) { Dialer: dialer, ChannelzParent: channelzSubChannel(t), } - tr, err := NewClientTransport(ctx, ctx, addr, copts, func(GoAwayReason) {}) + tr, err := NewHTTP2Client(ctx, ctx, addr, copts, func(GoAwayReason) {}) if err != nil { - t.Fatalf("NewClientTransport(): %v", err) + t.Fatalf("NewHTTP2Client(): %v", err) } defer tr.Close(fmt.Errorf("closed manually by test")) @@ -2759,7 +2759,7 @@ func (s) TestClientSendsAGoAwayFrame(t *testing.T) { } }() - ct, err := NewClientTransport(ctx, ctx, resolver.Address{Addr: lis.Addr().String()}, ConnectOptions{}, func(GoAwayReason) {}) + ct, err := NewHTTP2Client(ctx, ctx, resolver.Address{Addr: lis.Addr().String()}, ConnectOptions{}, func(GoAwayReason) {}) if err != nil { t.Fatalf("Error while creating client transport: %v", err) } @@ -2827,7 +2827,7 @@ func (s) TestClientCloseReturnsAfterReaderCompletes(t *testing.T) { // Create a client transport with a custom dialer that hangs the Read() // after Close(). - ct, err := NewClientTransport(ctx, context.Background(), addr, copts, func(GoAwayReason) {}) + ct, err := NewHTTP2Client(ctx, context.Background(), addr, copts, func(GoAwayReason) {}) if err != nil { t.Fatalf("Failed to create transport: %v", err) } @@ -2914,7 +2914,7 @@ func (s) TestClientCloseReturnsEarlyWhenGoAwayWriteHangs(t *testing.T) { copts := ConnectOptions{Dialer: dialer} copts.ChannelzParent = channelzSubChannel(t) // Create client transport with custom dialer - ct, connErr := NewClientTransport(connectCtx, context.Background(), addr, copts, func(GoAwayReason) {}) + ct, connErr := NewHTTP2Client(connectCtx, context.Background(), addr, copts, func(GoAwayReason) {}) if connErr != nil { t.Fatalf("failed to create transport: %v", connErr) } From 2a18bfcb16f3b7961ff28c902753c0b75e7d3ed8 Mon Sep 17 00:00:00 2001 From: Doug Fawley Date: Mon, 4 Nov 2024 13:42:38 -0800 Subject: [PATCH 42/57] transport: refactor to split ClientStream from ServerStream from common Stream functionality (#7802) --- internal/transport/client_stream.go | 115 +++++++++++ internal/transport/handler_server.go | 32 +-- internal/transport/handler_server_test.go | 24 +-- internal/transport/http2_client.go | 42 ++-- internal/transport/http2_server.go | 40 ++-- internal/transport/server_stream.go | 158 ++++++++++++++ internal/transport/transport.go | 239 ++-------------------- internal/transport/transport_test.go | 64 +++--- rpc_util.go | 8 +- server.go | 18 +- server_test.go | 4 +- stream.go | 6 +- 12 files changed, 410 insertions(+), 340 deletions(-) create mode 100644 internal/transport/client_stream.go create mode 100644 internal/transport/server_stream.go diff --git a/internal/transport/client_stream.go b/internal/transport/client_stream.go new file mode 100644 index 000000000000..112b34f6d7cc --- /dev/null +++ b/internal/transport/client_stream.go @@ -0,0 +1,115 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync/atomic" + + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// ClientStream implements streaming functionality for a gRPC client. +type ClientStream struct { + *Stream // Embed for common stream functionality. + + ct ClientTransport + done chan struct{} // closed at the end of stream to unblock writers. + doneFunc func() // invoked at the end of stream. + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). + headerValid bool + header metadata.MD // the received header metadata + noHeaders bool // set if the client never received headers (set only after the stream is done). + + bytesReceived uint32 // indicates whether any bytes have been received on this stream + unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream + + status *status.Status // the status error received from the server +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *ClientStream) BytesReceived() bool { + return atomic.LoadUint32(&s.bytesReceived) == 1 +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *ClientStream) Unprocessed() bool { + return atomic.LoadUint32(&s.unprocessed) == 1 +} + +func (s *ClientStream) waitOnHeader() { + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.ct.CloseStream(s, ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *ClientStream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *ClientStream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. Acquires the key-value +// pairs of header metadata once it is available. It blocks until i) the +// metadata is ready or ii) there is no header metadata or iii) the stream is +// canceled/expired. +func (s *ClientStream) Header() (metadata.MD, error) { + s.waitOnHeader() + + if !s.headerValid || s.noHeaders { + return nil, s.status.Err() + } + + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. +func (s *ClientStream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *ClientStream) Status() *status.Status { + return s.status +} diff --git a/internal/transport/handler_server.go b/internal/transport/handler_server.go index ce878693bd74..0ebe4a71cb9b 100644 --- a/internal/transport/handler_server.go +++ b/internal/transport/handler_server.go @@ -225,7 +225,7 @@ func (ht *serverHandlerTransport) do(fn func()) error { } } -func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { +func (ht *serverHandlerTransport) WriteStatus(s *ServerStream, st *status.Status) error { ht.writeStatusMu.Lock() defer ht.writeStatusMu.Unlock() @@ -289,14 +289,14 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro // writePendingHeaders sets common and custom headers on the first // write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { +func (ht *serverHandlerTransport) writePendingHeaders(s *ServerStream) { ht.writeCommonHeaders(s) ht.writeCustomHeaders(s) } // writeCommonHeaders sets common headers on the first write // call (Write, WriteHeader, or WriteStatus). -func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { +func (ht *serverHandlerTransport) writeCommonHeaders(s *ServerStream) { h := ht.rw.Header() h["Date"] = nil // suppress Date to make tests happy; TODO: restore h.Set("Content-Type", ht.contentType) @@ -317,7 +317,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { // writeCustomHeaders sets custom headers set on the stream via SetHeader // on the first write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { +func (ht *serverHandlerTransport) writeCustomHeaders(s *ServerStream) { h := ht.rw.Header() s.hdrMu.Lock() @@ -333,7 +333,7 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { +func (ht *serverHandlerTransport) Write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *Options) error { // Always take a reference because otherwise there is no guarantee the data will // be available after this function returns. This is what callers to Write // expect. @@ -357,7 +357,7 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSl return nil } -func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { +func (ht *serverHandlerTransport) WriteHeader(s *ServerStream, md metadata.MD) error { if err := s.SetHeader(md); err != nil { return err } @@ -385,7 +385,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) { // With this transport type there will be exactly 1 stream: this HTTP request. var cancel context.CancelFunc if ht.timeoutSet { @@ -408,16 +408,18 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &Stream{ - id: 0, // irrelevant - ctx: ctx, - requestRead: func(int) {}, + s := &ServerStream{ + Stream: &Stream{ + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + buf: newRecvBuffer(), + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + }, cancel: cancel, - buf: newRecvBuffer(), st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ diff --git a/internal/transport/handler_server_test.go b/internal/transport/handler_server_test.go index a60ab859ac33..32f6325bd0ee 100644 --- a/internal/transport/handler_server_test.go +++ b/internal/transport/handler_server_test.go @@ -274,7 +274,7 @@ func newHandleStreamTest(t *testing.T) *handleStreamTest { func (s) TestHandlerTransport_HandleStreams(t *testing.T) { st := newHandleStreamTest(t) - handleStream := func(s *Stream) { + handleStream := func(s *ServerStream) { if want := "/service/foo.bar"; s.method != want { t.Errorf("stream method = %q; want %q", s.method, want) } @@ -313,7 +313,7 @@ func (s) TestHandlerTransport_HandleStreams(t *testing.T) { st.ht.WriteStatus(s, status.New(codes.OK, "")) } st.ht.HandleStreams( - context.Background(), func(s *Stream) { go handleStream(s) }, + context.Background(), func(s *ServerStream) { go handleStream(s) }, ) wantHeader := http.Header{ "Date": nil, @@ -342,11 +342,11 @@ func (s) TestHandlerTransport_HandleStreams_InvalidArgument(t *testing.T) { func handleStreamCloseBodyTest(t *testing.T, statusCode codes.Code, msg string) { st := newHandleStreamTest(t) - handleStream := func(s *Stream) { + handleStream := func(s *ServerStream) { st.ht.WriteStatus(s, status.New(statusCode, msg)) } st.ht.HandleStreams( - context.Background(), func(s *Stream) { go handleStream(s) }, + context.Background(), func(s *ServerStream) { go handleStream(s) }, ) wantHeader := http.Header{ "Date": nil, @@ -379,7 +379,7 @@ func (s) TestHandlerTransport_HandleStreams_Timeout(t *testing.T) { if err != nil { t.Fatal(err) } - runStream := func(s *Stream) { + runStream := func(s *ServerStream) { defer bodyw.Close() select { case <-s.ctx.Done(): @@ -395,7 +395,7 @@ func (s) TestHandlerTransport_HandleStreams_Timeout(t *testing.T) { ht.WriteStatus(s, status.New(codes.DeadlineExceeded, "too slow")) } ht.HandleStreams( - context.Background(), func(s *Stream) { go runStream(s) }, + context.Background(), func(s *ServerStream) { go runStream(s) }, ) wantHeader := http.Header{ "Date": nil, @@ -412,7 +412,7 @@ func (s) TestHandlerTransport_HandleStreams_Timeout(t *testing.T) { // TestHandlerTransport_HandleStreams_MultiWriteStatus ensures that // concurrent "WriteStatus"s do not panic writing to closed "writes" channel. func (s) TestHandlerTransport_HandleStreams_MultiWriteStatus(t *testing.T) { - testHandlerTransportHandleStreams(t, func(st *handleStreamTest, s *Stream) { + testHandlerTransportHandleStreams(t, func(st *handleStreamTest, s *ServerStream) { if want := "/service/foo.bar"; s.method != want { t.Errorf("stream method = %q; want %q", s.method, want) } @@ -433,7 +433,7 @@ func (s) TestHandlerTransport_HandleStreams_MultiWriteStatus(t *testing.T) { // TestHandlerTransport_HandleStreams_WriteStatusWrite ensures that "Write" // following "WriteStatus" does not panic writing to closed "writes" channel. func (s) TestHandlerTransport_HandleStreams_WriteStatusWrite(t *testing.T) { - testHandlerTransportHandleStreams(t, func(st *handleStreamTest, s *Stream) { + testHandlerTransportHandleStreams(t, func(st *handleStreamTest, s *ServerStream) { if want := "/service/foo.bar"; s.method != want { t.Errorf("stream method = %q; want %q", s.method, want) } @@ -444,10 +444,10 @@ func (s) TestHandlerTransport_HandleStreams_WriteStatusWrite(t *testing.T) { }) } -func testHandlerTransportHandleStreams(t *testing.T, handleStream func(st *handleStreamTest, s *Stream)) { +func testHandlerTransportHandleStreams(t *testing.T, handleStream func(st *handleStreamTest, s *ServerStream)) { st := newHandleStreamTest(t) st.ht.HandleStreams( - context.Background(), func(s *Stream) { go handleStream(st, s) }, + context.Background(), func(s *ServerStream) { go handleStream(st, s) }, ) } @@ -476,11 +476,11 @@ func (s) TestHandlerTransport_HandleStreams_ErrDetails(t *testing.T) { } hst := newHandleStreamTest(t) - handleStream := func(s *Stream) { + handleStream := func(s *ServerStream) { hst.ht.WriteStatus(s, st) } hst.ht.HandleStreams( - context.Background(), func(s *Stream) { go handleStream(s) }, + context.Background(), func(s *ServerStream) { go handleStream(s) }, ) wantHeader := http.Header{ "Date": nil, diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index 6d7374ed1b94..f0c5cbc47645 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -123,7 +123,7 @@ type http2Client struct { mu sync.Mutex // guard the following variables nextID uint32 state transportState - activeStreams map[uint32]*Stream + activeStreams map[uint32]*ClientStream // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. prevGoAwayID uint32 // goAwayReason records the http2.ErrCode and debug data received with the @@ -339,7 +339,7 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, - activeStreams: make(map[uint32]*Stream), + activeStreams: make(map[uint32]*ClientStream), isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, @@ -480,17 +480,19 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return t, nil } -func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. - s := &Stream{ - ct: t, - done: make(chan struct{}), - method: callHdr.Method, - sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), - headerChan: make(chan struct{}), - contentSubtype: callHdr.ContentSubtype, - doneFunc: callHdr.DoneFunc, + s := &ClientStream{ + Stream: &Stream{ + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + contentSubtype: callHdr.ContentSubtype, + }, + ct: t, + done: make(chan struct{}), + headerChan: make(chan struct{}), + doneFunc: callHdr.DoneFunc, } s.wq = newWriteQuota(defaultWriteQuota, s.done) s.requestRead = func(n int) { @@ -738,7 +740,7 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) { ctx = peer.NewContext(ctx, t.getPeer()) // ServerName field of the resolver returned address takes precedence over @@ -910,7 +912,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, // CloseStream clears the footprint of a stream when the stream is not needed any more. // This must not be executed in reader's goroutine. -func (t *http2Client) CloseStream(s *Stream, err error) { +func (t *http2Client) CloseStream(s *ClientStream, err error) { var ( rst bool rstCode http2.ErrCode @@ -922,7 +924,7 @@ func (t *http2Client) CloseStream(s *Stream, err error) { t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) } -func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { +func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { // Set stream status to done. if s.swapState(streamDone) == streamDone { // If it was already done, return. If multiple closeStream calls @@ -1085,7 +1087,7 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { +func (t *http2Client) Write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *Options) error { reader := data.Reader() if opts.Last { @@ -1117,7 +1119,7 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *O return nil } -func (t *http2Client) getStream(f http2.Frame) *Stream { +func (t *http2Client) getStream(f http2.Frame) *ClientStream { t.mu.Lock() s := t.activeStreams[f.Header().StreamID] t.mu.Unlock() @@ -1127,7 +1129,7 @@ func (t *http2Client) getStream(f http2.Frame) *Stream { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -func (t *http2Client) adjustWindow(s *Stream, n uint32) { +func (t *http2Client) adjustWindow(s *ClientStream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1136,7 +1138,7 @@ func (t *http2Client) adjustWindow(s *Stream, n uint32) { // updateWindow adjusts the inbound quota for the stream. // Window updates will be sent out when the cumulative quota // exceeds the corresponding threshold. -func (t *http2Client) updateWindow(s *Stream, n uint32) { +func (t *http2Client) updateWindow(s *ClientStream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1383,7 +1385,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { return connectionErrorf(true, nil, "received goaway and there are no active streams") } - streamsToClose := make([]*Stream, 0) + streamsToClose := make([]*ClientStream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 279cd5ccb1b4..f6faa29b9520 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -111,7 +111,7 @@ type http2Server struct { // already initialized since draining is already underway. drainEvent *grpcsync.Event state transportState - activeStreams map[uint32]*Stream + activeStreams map[uint32]*ServerStream // idle is the time instant when the connection went idle. // This is either the beginning of the connection or when the number of // RPCs go down to 0. @@ -256,7 +256,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, - activeStreams: make(map[uint32]*Stream), + activeStreams: make(map[uint32]*ServerStream), stats: config.StatsHandlers, kp: kp, idle: time.Now(), @@ -359,7 +359,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*ServerStream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -385,11 +385,13 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.maxStreamID = streamID buf := newRecvBuffer() - s := &Stream{ - id: streamID, + s := &ServerStream{ + Stream: &Stream{ + id: streamID, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + }, st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, headerWireLength: int(frame.Header().Length), } var ( @@ -634,7 +636,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStream)) { defer func() { close(t.readerDone) <-t.loopyWriterDone @@ -698,7 +700,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { } } -func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { +func (t *http2Server) getStream(f http2.Frame) (*ServerStream, bool) { t.mu.Lock() defer t.mu.Unlock() if t.activeStreams == nil { @@ -716,7 +718,7 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -func (t *http2Server) adjustWindow(s *Stream, n uint32) { +func (t *http2Server) adjustWindow(s *ServerStream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -726,7 +728,7 @@ func (t *http2Server) adjustWindow(s *Stream, n uint32) { // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. -func (t *http2Server) updateWindow(s *Stream, n uint32) { +func (t *http2Server) updateWindow(s *ServerStream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w, @@ -963,7 +965,7 @@ func (t *http2Server) checkForHeaderListSize(it any) bool { return true } -func (t *http2Server) streamContextErr(s *Stream) error { +func (t *http2Server) streamContextErr(s *ServerStream) error { select { case <-t.done: return ErrConnClosing @@ -973,7 +975,7 @@ func (t *http2Server) streamContextErr(s *Stream) error { } // WriteHeader sends the header metadata md back to the client. -func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { +func (t *http2Server) WriteHeader(s *ServerStream, md metadata.MD) error { s.hdrMu.Lock() defer s.hdrMu.Unlock() if s.getState() == streamDone { @@ -1006,7 +1008,7 @@ func (t *http2Server) setResetPingStrikes() { atomic.StoreUint32(&t.resetPingStrikes, 1) } -func (t *http2Server) writeHeaderLocked(s *Stream) error { +func (t *http2Server) writeHeaderLocked(s *ServerStream) error { // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. @@ -1046,7 +1048,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // There is no further I/O operations being able to perform on this stream. // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. -func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { +func (t *http2Server) WriteStatus(s *ServerStream, st *status.Status) error { s.hdrMu.Lock() defer s.hdrMu.Unlock() @@ -1117,7 +1119,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { +func (t *http2Server) Write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *Options) error { reader := data.Reader() if !s.isHeaderSent() { // Headers haven't been written yet. @@ -1276,7 +1278,7 @@ func (t *http2Server) Close(err error) { } // deleteStream deletes the stream s from transport's active streams. -func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { +func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1297,7 +1299,7 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { } // finishStream closes the stream and puts the trailing headerFrame into controlbuf. -func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { +func (t *http2Server) finishStream(s *ServerStream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1321,7 +1323,7 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h } // closeStream clears the footprint of a stream when the stream is not needed any more. -func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { +func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCode, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. diff --git a/internal/transport/server_stream.go b/internal/transport/server_stream.go new file mode 100644 index 000000000000..acbf014900bc --- /dev/null +++ b/internal/transport/server_stream.go @@ -0,0 +1,158 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "errors" + "strings" + "sync" + "sync/atomic" + + "google.golang.org/grpc/metadata" +) + +// ServerStream implements streaming functionality for a gRPC server. +type ServerStream struct { + *Stream // Embed for common stream functionality. + + st ServerTransport + ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) + cancel context.CancelFunc // invoked at the end of stream to cancel ctx. + + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. + clientAdvertisedCompressors string + headerWireLength int + + // hdrMu protects outgoing header and trailer metadata. + hdrMu sync.Mutex + header metadata.MD // the outgoing header metadata. Updated by WriteHeader. + headerSent uint32 // atomically set to 1 when the headers are sent out. +} + +// isHeaderSent indicates whether headers have been sent. +func (s *ServerStream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was already set. +func (s *ServerStream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *ServerStream) RecvCompress() string { + return s.recvCompress +} + +// SendCompress returns the send compressor name. +func (s *ServerStream) SendCompress() string { + return s.sendCompress +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *ServerStream) ContentSubtype() string { + return s.contentSubtype +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *ServerStream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *ServerStream) SetContext(ctx context.Context) { + s.ctx = ctx +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *ServerStream) ClientAdvertisedCompressors() []string { + values := strings.Split(s.clientAdvertisedCompressors, ",") + for i, v := range values { + values[i] = strings.TrimSpace(v) + } + return values +} + +// Header returns the header metadata of the stream. It returns the out header +// after t.WriteHeader is called. It does not block and must not be called +// until after WriteHeader. +func (s *ServerStream) Header() (metadata.MD, error) { + // Return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil +} + +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. +func (s *ServerStream) HeaderWireLength() int { + return s.headerWireLength +} + +// SetHeader sets the header metadata. This can be called multiple times. +// This should not be called in parallel to other data writes. +func (s *ServerStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SendHeader sends the given header metadata. The given metadata is +// combined with any metadata set by previous calls to SetHeader and +// then written to the transport stream. +func (s *ServerStream) SendHeader(md metadata.MD) error { + return s.st.WriteHeader(s, md) +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. +// This should not be called parallel to other data writes. +func (s *ServerStream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} diff --git a/internal/transport/transport.go b/internal/transport/transport.go index 4e01a54d0c1d..f3148e31c5dd 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -27,7 +27,6 @@ import ( "fmt" "io" "net" - "strings" "sync" "sync/atomic" "time" @@ -287,14 +286,8 @@ const ( // Stream represents an RPC in the transport layer. type Stream struct { id uint32 - st ServerTransport // nil for client side Stream - ct ClientTransport // nil for server side Stream - ctx context.Context // the associated context of the stream - cancel context.CancelFunc // always nil for client side Stream - done chan struct{} // closed at the end of stream to unblock writers. On the client side. - doneFunc func() // invoked at the end of stream on client side. - ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) - method string // the associated RPC method of the stream + ctx context.Context // the associated context of the stream + method string // the associated RPC method of the stream recvCompress string sendCompress string buf *recvBuffer @@ -302,58 +295,17 @@ type Stream struct { fc *inFlow wq *writeQuota - // Holds compressor names passed in grpc-accept-encoding metadata from the - // client. This is empty for the client side stream. - clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) - headerChan chan struct{} // closed to indicate the end of header metadata. - headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. - // headerValid indicates whether a valid header was received. Only - // meaningful after headerChan is closed (always call waitOnHeader() before - // reading its value). Not valid on server side. - headerValid bool - headerWireLength int // Only set on server side. - - // hdrMu protects header and trailer metadata on the server-side. - hdrMu sync.Mutex - // On client side, header keeps the received header metadata. - // - // On server side, header keeps the header set by SetHeader(). The complete - // header will merged into this after t.WriteHeader() is called. - header metadata.MD - trailer metadata.MD // the key-value map of trailer metadata. - - noHeaders bool // set if the client never received headers (set only after the stream is done). - - // On the server-side, headerSent is atomically set to 1 when the headers are sent out. - headerSent uint32 - state streamState - // On client-side it is the status error received from the server. - // On server-side it is unused. - status *status.Status - - bytesReceived uint32 // indicates whether any bytes have been received on this stream - unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream - // contentSubtype is the content-subtype for requests. // this must be lowercase or the behavior is undefined. contentSubtype string -} -// isHeaderSent is only valid on the server-side. -func (s *Stream) isHeaderSent() bool { - return atomic.LoadUint32(&s.headerSent) == 1 -} - -// updateHeaderSent updates headerSent and returns true -// if it was already set. It is valid only on server-side. -func (s *Stream) updateHeaderSent() bool { - return atomic.SwapUint32(&s.headerSent, 1) == 1 + trailer metadata.MD // the key-value map of trailer metadata. } func (s *Stream) swapState(st streamState) streamState { @@ -368,110 +320,12 @@ func (s *Stream) getState() streamState { return streamState(atomic.LoadUint32((*uint32)(&s.state))) } -func (s *Stream) waitOnHeader() { - if s.headerChan == nil { - // On the server headerChan is always nil since a stream originates - // only after having received headers. - return - } - select { - case <-s.ctx.Done(): - // Close the stream to prevent headers/trailers from changing after - // this function returns. - s.ct.CloseStream(s, ContextErr(s.ctx.Err())) - // headerChan could possibly not be closed yet if closeStream raced - // with operateHeaders; wait until it is closed explicitly here. - <-s.headerChan - case <-s.headerChan: - } -} - -// RecvCompress returns the compression algorithm applied to the inbound -// message. It is empty string if there is no compression applied. -func (s *Stream) RecvCompress() string { - s.waitOnHeader() - return s.recvCompress -} - -// SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(name string) error { - if s.isHeaderSent() || s.getState() == streamDone { - return errors.New("transport: set send compressor called after headers sent or stream done") - } - - s.sendCompress = name - return nil -} - -// SendCompress returns the send compressor name. -func (s *Stream) SendCompress() string { - return s.sendCompress -} - -// ClientAdvertisedCompressors returns the compressor names advertised by the -// client via grpc-accept-encoding header. -func (s *Stream) ClientAdvertisedCompressors() []string { - values := strings.Split(s.clientAdvertisedCompressors, ",") - for i, v := range values { - values[i] = strings.TrimSpace(v) - } - return values -} - -// Done returns a channel which is closed when it receives the final status -// from the server. -func (s *Stream) Done() <-chan struct{} { - return s.done -} - -// Header returns the header metadata of the stream. -// -// On client side, it acquires the key-value pairs of header metadata once it is -// available. It blocks until i) the metadata is ready or ii) there is no header -// metadata or iii) the stream is canceled/expired. -// -// On server side, it returns the out header after t.WriteHeader is called. It -// does not block and must not be called until after WriteHeader. -func (s *Stream) Header() (metadata.MD, error) { - if s.headerChan == nil { - // On server side, return the header in stream. It will be the out - // header after t.WriteHeader is called. - return s.header.Copy(), nil - } - s.waitOnHeader() - - if !s.headerValid || s.noHeaders { - return nil, s.status.Err() - } - - return s.header.Copy(), nil -} - -// TrailersOnly blocks until a header or trailers-only frame is received and -// then returns true if the stream was trailers-only. If the stream ends -// before headers are received, returns true, nil. Client-side only. -func (s *Stream) TrailersOnly() bool { - s.waitOnHeader() - return s.noHeaders -} - // Trailer returns the cached trailer metadata. Note that if it is not called -// after the entire stream is done, it could return an empty MD. Client -// side only. +// after the entire stream is done, it could return an empty MD. // It can be safely read only after stream has ended that is either read // or write have returned io.EOF. func (s *Stream) Trailer() metadata.MD { - c := s.trailer.Copy() - return c -} - -// ContentSubtype returns the content-subtype for a request. For example, a -// content-subtype of "proto" will result in a content-type of -// "application/grpc+proto". This will always be lowercase. See -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -func (s *Stream) ContentSubtype() string { - return s.contentSubtype + return s.trailer.Copy() } // Context returns the context of the stream. @@ -479,69 +333,11 @@ func (s *Stream) Context() context.Context { return s.ctx } -// SetContext sets the context of the stream. This will be deleted once the -// stats handler callouts all move to gRPC layer. -func (s *Stream) SetContext(ctx context.Context) { - s.ctx = ctx -} - // Method returns the method for the stream. func (s *Stream) Method() string { return s.method } -// Status returns the status received from the server. -// Status can be read safely only after the stream has ended, -// that is, after Done() is closed. -func (s *Stream) Status() *status.Status { - return s.status -} - -// HeaderWireLength returns the size of the headers of the stream as received -// from the wire. Valid only on the server. -func (s *Stream) HeaderWireLength() int { - return s.headerWireLength -} - -// SetHeader sets the header metadata. This can be called multiple times. -// Server side only. -// This should not be called in parallel to other data writes. -func (s *Stream) SetHeader(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.isHeaderSent() || s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.header = metadata.Join(s.header, md) - s.hdrMu.Unlock() - return nil -} - -// SendHeader sends the given header metadata. The given metadata is -// combined with any metadata set by previous calls to SetHeader and -// then written to the transport stream. -func (s *Stream) SendHeader(md metadata.MD) error { - return s.st.WriteHeader(s, md) -} - -// SetTrailer sets the trailer metadata which will be sent with the RPC status -// by the server. This can be called multiple times. Server side only. -// This should not be called parallel to other data writes. -func (s *Stream) SetTrailer(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.trailer = metadata.Join(s.trailer, md) - s.hdrMu.Unlock() - return nil -} - func (s *Stream) write(m recvMsg) { s.buf.put(m) } @@ -638,17 +434,6 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) { return buf, nil } -// BytesReceived indicates whether any bytes have been received on this stream. -func (s *Stream) BytesReceived() bool { - return atomic.LoadUint32(&s.bytesReceived) == 1 -} - -// Unprocessed indicates whether the server did not process this stream -- -// i.e. it sent a refused stream or GOAWAY including this stream ID. -func (s *Stream) Unprocessed() bool { - return atomic.LoadUint32(&s.unprocessed) == 1 -} - // GoString is implemented by Stream so context.String() won't // race when printing %#v. func (s *Stream) GoString() string { @@ -777,16 +562,16 @@ type ClientTransport interface { // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error + Write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *Options) error // NewStream creates a Stream for an RPC. - NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) // CloseStream clears the footprint of a stream when the stream is // not needed any more. The err indicates the error incurred when // CloseStream is called. Must be called when a stream is finished // unless the associated transport is closing. - CloseStream(stream *Stream, err error) + CloseStream(stream *ClientStream, err error) // Error returns a channel that is closed when some I/O error // happens. Typically the caller should have a goroutine to monitor @@ -821,19 +606,19 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(context.Context, func(*Stream)) + HandleStreams(context.Context, func(*ServerStream)) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. - WriteHeader(s *Stream, md metadata.MD) error + WriteHeader(s *ServerStream, md metadata.MD) error // Write sends the data for the given stream. // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error + Write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. - WriteStatus(s *Stream, st *status.Status) error + WriteStatus(s *ServerStream, st *status.Status) error // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 766a59b99657..832435f62438 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -117,7 +117,7 @@ const ( pingpong ) -func (h *testStreamHandler) handleStreamAndNotify(*Stream) { +func (h *testStreamHandler) handleStreamAndNotify(*ServerStream) { if h.notify == nil { return } @@ -130,7 +130,7 @@ func (h *testStreamHandler) handleStreamAndNotify(*Stream) { }() } -func (h *testStreamHandler) handleStream(t *testing.T, s *Stream) { +func (h *testStreamHandler) handleStream(t *testing.T, s *ServerStream) { req := expectedRequest resp := expectedResponse if s.Method() == "foo.Large" { @@ -153,7 +153,7 @@ func (h *testStreamHandler) handleStream(t *testing.T, s *Stream) { h.t.WriteStatus(s, status.New(codes.OK, "")) } -func (h *testStreamHandler) handleStreamPingPong(t *testing.T, s *Stream) { +func (h *testStreamHandler) handleStreamPingPong(t *testing.T, s *ServerStream) { header := make([]byte, 5) for { if _, err := s.readTo(header); err != nil { @@ -180,7 +180,7 @@ func (h *testStreamHandler) handleStreamPingPong(t *testing.T, s *Stream) { } } -func (h *testStreamHandler) handleStreamMisbehave(t *testing.T, s *Stream) { +func (h *testStreamHandler) handleStreamMisbehave(t *testing.T, s *ServerStream) { conn, ok := s.st.(*http2Server) if !ok { t.Errorf("Failed to convert %v to *http2Server", s.st) @@ -213,14 +213,14 @@ func (h *testStreamHandler) handleStreamMisbehave(t *testing.T, s *Stream) { } } -func (h *testStreamHandler) handleStreamEncodingRequiredStatus(s *Stream) { +func (h *testStreamHandler) handleStreamEncodingRequiredStatus(s *ServerStream) { // raw newline is not accepted by http2 framer so it must be encoded. h.t.WriteStatus(s, encodingTestStatus) // Drain any remaining buffers from the stream since it was closed early. s.Read(math.MaxInt) } -func (h *testStreamHandler) handleStreamInvalidHeaderField(s *Stream) { +func (h *testStreamHandler) handleStreamInvalidHeaderField(s *ServerStream) { headerFields := []hpack.HeaderField{} headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: expectedInvalidHeaderField}) h.t.controlBuf.put(&headerFrame{ @@ -234,7 +234,7 @@ func (h *testStreamHandler) handleStreamInvalidHeaderField(s *Stream) { // stream-level flow control. // This handler assumes dynamic flow control is turned off and assumes window // sizes to be set to defaultWindowSize. -func (h *testStreamHandler) handleStreamDelayRead(t *testing.T, s *Stream) { +func (h *testStreamHandler) handleStreamDelayRead(t *testing.T, s *ServerStream) { req := expectedRequest resp := expectedResponse if s.Method() == "foo.Large" { @@ -385,17 +385,17 @@ func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hT case notifyCall: go transport.HandleStreams(context.Background(), h.handleStreamAndNotify) case suspended: - go transport.HandleStreams(context.Background(), func(*Stream) {}) + go transport.HandleStreams(context.Background(), func(*ServerStream) {}) case misbehaved: - go transport.HandleStreams(context.Background(), func(s *Stream) { + go transport.HandleStreams(context.Background(), func(s *ServerStream) { go h.handleStreamMisbehave(t, s) }) case encodingRequiredStatus: - go transport.HandleStreams(context.Background(), func(s *Stream) { + go transport.HandleStreams(context.Background(), func(s *ServerStream) { go h.handleStreamEncodingRequiredStatus(s) }) case invalidHeaderField: - go transport.HandleStreams(context.Background(), func(s *Stream) { + go transport.HandleStreams(context.Background(), func(s *ServerStream) { go h.handleStreamInvalidHeaderField(s) }) case delayRead: @@ -404,15 +404,15 @@ func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hT s.mu.Lock() close(s.ready) s.mu.Unlock() - go transport.HandleStreams(context.Background(), func(s *Stream) { + go transport.HandleStreams(context.Background(), func(s *ServerStream) { go h.handleStreamDelayRead(t, s) }) case pingpong: - go transport.HandleStreams(context.Background(), func(s *Stream) { + go transport.HandleStreams(context.Background(), func(s *ServerStream) { go h.handleStreamPingPong(t, s) }) default: - go transport.HandleStreams(context.Background(), func(s *Stream) { + go transport.HandleStreams(context.Background(), func(s *ServerStream) { go h.handleStream(t, s) }) } @@ -941,7 +941,7 @@ func (s) TestMaxStreams(t *testing.T) { } // Keep creating streams until one fails with deadline exceeded, marking the application // of server settings on client. - slist := []*Stream{} + slist := []*ClientStream{} pctx, cancel := context.WithCancel(context.Background()) defer cancel() timer := time.NewTimer(time.Second * 10) @@ -1035,7 +1035,7 @@ func (s) TestServerContextCanceledOnClosedConnection(t *testing.T) { onEachWrite: func() {}, }) // Loop until the server side stream is created. - var ss *Stream + var ss *ServerStream for { time.Sleep(time.Second) sc.mu.Lock() @@ -1095,7 +1095,7 @@ func (s) TestClientConnDecoupledFromApplicationRead(t *testing.T) { } <-notifyChan - var sstream1 *Stream + var sstream1 *ServerStream // Access stream on the server. st.mu.Lock() for _, v := range st.activeStreams { @@ -1121,7 +1121,7 @@ func (s) TestClientConnDecoupledFromApplicationRead(t *testing.T) { t.Fatalf("Client failed to create second stream. Err: %v", err) } <-notifyChan - var sstream2 *Stream + var sstream2 *ServerStream st.mu.Lock() for _, v := range st.activeStreams { if v.id == cstream2.id { @@ -1200,7 +1200,7 @@ func (s) TestServerConnDecoupledFromApplicationRead(t *testing.T) { } return false, nil }) - var sstream1 *Stream + var sstream1 *ServerStream st.mu.Lock() for _, v := range st.activeStreams { if v.id == 1 { @@ -1654,7 +1654,7 @@ func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() const numStreams = 5 - clientStreams := make([]*Stream, numStreams) + clientStreams := make([]*ClientStream, numStreams) for i := 0; i < numStreams; i++ { var err error clientStreams[i], err = client.NewStream(ctx, &CallHdr{}) @@ -1666,7 +1666,7 @@ func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) // For each stream send pingpong messages to the server. for _, stream := range clientStreams { wg.Add(1) - go func(stream *Stream) { + go func(stream *ClientStream) { defer wg.Done() buf := make([]byte, msgSize+5) buf[0] = byte(0) @@ -1697,7 +1697,7 @@ func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) }(stream) } wg.Wait() - serverStreams := map[uint32]*Stream{} + serverStreams := map[uint32]*ServerStream{} loopyClientStreams := map[uint32]*outStream{} loopyServerStreams := map[uint32]*outStream{} // Get all the streams from server reader and writer and client writer. @@ -2211,7 +2211,7 @@ func (s) TestWriteHeaderConnectionError(t *testing.T) { } <-notifyChan // Wait for server stream to be established. - var sstream *Stream + var sstream *ServerStream // Access stream on the server. serverTransport.mu.Lock() for _, v := range serverTransport.activeStreams { @@ -2512,21 +2512,23 @@ func (s) TestClientHandshakeInfoDialer(t *testing.T) { } func (s) TestClientDecodeHeaderStatusErr(t *testing.T) { - testStream := func() *Stream { - return &Stream{ + testStream := func() *ClientStream { + return &ClientStream{ + Stream: &Stream{ + buf: &recvBuffer{ + c: make(chan recvMsg), + mu: sync.Mutex{}, + }, + }, done: make(chan struct{}), headerChan: make(chan struct{}), - buf: &recvBuffer{ - c: make(chan recvMsg), - mu: sync.Mutex{}, - }, } } - testClient := func(ts *Stream) *http2Client { + testClient := func(ts *ClientStream) *http2Client { return &http2Client{ mu: sync.Mutex{}, - activeStreams: map[uint32]*Stream{ + activeStreams: map[uint32]*ClientStream{ 0: ts, }, controlBuf: newControlBuffer(make(<-chan struct{})), diff --git a/rpc_util.go b/rpc_util.go index aba1ae3e6784..033ffdc1c9bf 100644 --- a/rpc_util.go +++ b/rpc_util.go @@ -817,7 +817,7 @@ func (p *payloadInfo) free() { // the buffer is no longer needed. // TODO: Refactor this function to reduce the number of arguments. // See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, ) (out mem.BufferSlice, err error) { pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { @@ -908,10 +908,14 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMes return out, out.Len(), nil } +type recvCompressor interface { + RecvCompress() string +} + // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { +func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err diff --git a/server.go b/server.go index 62a5b04b76e2..23a1660057e8 100644 --- a/server.go +++ b/server.go @@ -621,8 +621,8 @@ func bufferPool(bufferPool mem.BufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorker blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows multiple requests to be +// serverWorker blocks on a *transport.ServerStream channel forever and waits +// for data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // @@ -1020,7 +1020,7 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, }() streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(ctx, func(stream *transport.Stream) { + st.HandleStreams(ctx, func(stream *transport.ServerStream) { s.handlersWG.Add(1) streamQuota.acquire() f := func() { @@ -1136,7 +1136,7 @@ func (s *Server) incrCallsFailed() { s.channelz.ServerMetrics.CallsFailed.Add(1) } -func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.ServerStream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) @@ -1212,7 +1212,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } } -func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1541,7 +1541,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf } } -func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.ServerStream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1738,7 +1738,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran return t.WriteStatus(ss.s, statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) { ctx := stream.Context() ctx = contextWithServer(ctx, s) var ti *traceInfo @@ -2103,7 +2103,7 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func SetSendCompressor(ctx context.Context, name string) error { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) if !ok || stream == nil { return fmt.Errorf("failed to fetch the stream from the given context") } @@ -2125,7 +2125,7 @@ func SetSendCompressor(ctx context.Context, name string) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func ClientSupportedCompressors(ctx context.Context) ([]string, error) { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) if !ok || stream == nil { return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) } diff --git a/server_test.go b/server_test.go index c3f9c02f2238..07200fbd4da0 100644 --- a/server_test.go +++ b/server_test.go @@ -164,13 +164,13 @@ func (s) TestRetryChainedInterceptor(t *testing.T) { } func (s) TestStreamContext(t *testing.T) { - expectedStream := &transport.Stream{} + expectedStream := &transport.ServerStream{} ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() ctx = NewContextWithServerTransportStream(ctx, expectedStream) s := ServerTransportStreamFromContext(ctx) - stream, ok := s.(*transport.Stream) + stream, ok := s.(*transport.ServerStream) if !ok || expectedStream != stream { t.Fatalf("GetStreamFromContext(%v) = %v, %t, want: %v, true", ctx, stream, ok, expectedStream) } diff --git a/stream.go b/stream.go index b2d82c364d7d..34c846a436b5 100644 --- a/stream.go +++ b/stream.go @@ -584,7 +584,7 @@ type csAttempt struct { ctx context.Context cs *clientStream t transport.ClientTransport - s *transport.Stream + s *transport.ClientStream p *parser pickResult balancer.PickResult @@ -1340,7 +1340,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin } type addrConnStream struct { - s *transport.Stream + s *transport.ClientStream ac *addrConn callHdr *transport.CallHdr cancel context.CancelFunc @@ -1578,7 +1578,7 @@ type ServerStream interface { type serverStream struct { ctx context.Context t transport.ServerTransport - s *transport.Stream + s *transport.ServerStream p *parser codec baseCodec From 2de6df9c6fd2cb91dfbb88a307c144a46ac47729 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Mon, 4 Nov 2024 16:26:59 -0800 Subject: [PATCH 43/57] xds/resolver: fix flaky test TestResolverRemovedWithRPCs with a workaround (#7804) --- xds/internal/resolver/xds_resolver_test.go | 37 +++++++++++++++++++--- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index e5d569402ec8..77e8c47e6cd5 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -548,10 +548,39 @@ func (s) TestResolverRemovedWithRPCs(t *testing.T) { } } - // Re-add the listener and expect everything to work again. - configureResourcesOnManagementServer(ctx, t, mgmtServer, nodeID, listeners, routes) - // Read the update pushed by the resolver to the ClientConn. - cs = verifyUpdateFromResolver(ctx, t, stateCh, wantDefaultServiceConfig) + // Workaround for https://github.com/envoyproxy/go-control-plane/issues/431. + // + // The xDS client can miss route configurations due to a race condition + // between resource removal and re-addition. To avoid this, continuously + // push new versions of the resources to the server, ensuring the client + // eventually receives the configuration. + // + // TODO(https://github.com/grpc/grpc-go/issues/7807): Remove this workaround + // once the issue is fixed. +waitForStateUpdate: + for { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + + configureResourcesOnManagementServer(ctx, t, mgmtServer, nodeID, listeners, routes) + + select { + case state = <-stateCh: + if err := state.ServiceConfig.Err; err != nil { + t.Fatalf("Received error in service config: %v", state.ServiceConfig.Err) + } + wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(wantDefaultServiceConfig) + if !internal.EqualServiceConfigForTesting(state.ServiceConfig.Config, wantSCParsed.Config) { + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, state.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) + } + break waitForStateUpdate + case <-sCtx.Done(): + } + } + cs = iresolver.GetConfigSelector(state) + if cs == nil { + t.Fatal("Received nil config selector in update from resolver") + } res, err = cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) if err != nil { From 43ee17261c66304b4e7777e701224813d8ca28be Mon Sep 17 00:00:00 2001 From: Arjan Singh Bal <46515553+arjan-bal@users.noreply.github.com> Date: Tue, 5 Nov 2024 23:22:26 +0530 Subject: [PATCH 44/57] balancer: Enforce embedding the SubConn interface in implementations (#7758) --- balancer/balancer.go | 3 +++ balancer/base/balancer_test.go | 1 + balancer_wrapper.go | 1 + internal/testutils/balancer.go | 1 + 4 files changed, 6 insertions(+) diff --git a/balancer/balancer.go b/balancer/balancer.go index 3a2092f1056e..324915c1692f 100644 --- a/balancer/balancer.go +++ b/balancer/balancer.go @@ -155,6 +155,9 @@ type SubConn interface { // indicate the shutdown operation. This may be delivered before // in-progress RPCs are complete and the actual connection is closed. Shutdown() + // enforceEmbedding is an unexported method to force implementers embed + // this interface, allowing gRPC to add methods without breaking users. + enforceEmbedding() } // NewSubConnOptions contains options to create new SubConn. diff --git a/balancer/base/balancer_test.go b/balancer/base/balancer_test.go index ea868f29245d..ea4a4fda2493 100644 --- a/balancer/base/balancer_test.go +++ b/balancer/base/balancer_test.go @@ -41,6 +41,7 @@ func (c *testClientConn) NewSubConn(addrs []resolver.Address, opts balancer.NewS func (c *testClientConn) UpdateState(balancer.State) {} type testSubConn struct { + balancer.SubConn updateState func(balancer.SubConnState) } diff --git a/balancer_wrapper.go b/balancer_wrapper.go index 2a4f2878aef4..80620d31093d 100644 --- a/balancer_wrapper.go +++ b/balancer_wrapper.go @@ -254,6 +254,7 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { + balancer.SubConn ac *addrConn // read-only ccb *ccBalancerWrapper // read-only stateListener func(balancer.SubConnState) diff --git a/internal/testutils/balancer.go b/internal/testutils/balancer.go index 80021903df3c..423e8d25f2f2 100644 --- a/internal/testutils/balancer.go +++ b/internal/testutils/balancer.go @@ -32,6 +32,7 @@ import ( // TestSubConn implements the SubConn interface, to be used in tests. type TestSubConn struct { + balancer.SubConn tcc *BalancerClientConn // the CC that owns this SubConn id string ConnectCh chan struct{} From 0ec8fd84fdfb54f1b7f9c2d3d22aa20cd7a8cf09 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Tue, 5 Nov 2024 10:14:21 -0800 Subject: [PATCH 45/57] xdsclient/ads: reset the pending bit of ADS stream flow control at the end of the onDone method (#7806) --- xds/internal/xdsclient/transport/ads/ads_stream.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/xds/internal/xdsclient/transport/ads/ads_stream.go b/xds/internal/xdsclient/transport/ads/ads_stream.go index e5f6cefe6159..27ef4d101abd 100644 --- a/xds/internal/xdsclient/transport/ads/ads_stream.go +++ b/xds/internal/xdsclient/transport/ads/ads_stream.go @@ -812,8 +812,6 @@ func (fc *adsFlowControl) wait(ctx context.Context) bool { // onDone indicates that all watchers have consumed the most recent update. func (fc *adsFlowControl) onDone() { - fc.pending.Store(false) - select { // Writes to the readyCh channel should not block ideally. The default // branch here is to appease the paranoid mind. @@ -823,4 +821,5 @@ func (fc *adsFlowControl) onDone() { fc.logger.Infof("ADS stream flow control readyCh is full") } } + fc.pending.Store(false) } From e9ac44cb8c46ea680d2b9be8cb32ac4402a3e6b4 Mon Sep 17 00:00:00 2001 From: hanut19 <50198451+hanut19@users.noreply.github.com> Date: Wed, 6 Nov 2024 04:17:26 +0530 Subject: [PATCH 46/57] cleanup: replace grpc.Dial with grpc.NewClient in grpclb test (#7789) --- balancer/grpclb/grpclb_test.go | 41 +++++++++++++++++----------------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/balancer/grpclb/grpclb_test.go b/balancer/grpclb/grpclb_test.go index 10f4df0afa5f..62dc947e0ee5 100644 --- a/balancer/grpclb/grpclb_test.go +++ b/balancer/grpclb/grpclb_test.go @@ -827,28 +827,28 @@ func (s) TestGRPCLB_Fallback(t *testing.T) { defer stopBackends(standaloneBEs) r := manual.NewBuilderWithScheme("whatever") + // Set the initial resolver state with fallback backend address stored in + // the `Addresses` field and an invalid remote balancer address stored in + // attributes, which will cause fallback behavior to be invoked. + rs := resolver.State{ + Addresses: []resolver.Address{{Addr: beLis.Addr().String()}}, + ServiceConfig: internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(grpclbConfig), + } + rs = grpclbstate.Set(rs, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: "invalid.address", ServerName: lbServerName}}}) + r.InitialState(rs) + dopts := []grpc.DialOption{ grpc.WithResolvers(r), grpc.WithTransportCredentials(&serverNameCheckCreds{}), grpc.WithContextDialer(fakeNameDialer), } - cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, dopts...) + cc, err := grpc.NewClient(r.Scheme()+":///"+beServerName, dopts...) if err != nil { - t.Fatalf("Failed to dial to the backend %v", err) + t.Fatalf("Failed to create new client to the backend %v", err) } defer cc.Close() testC := testgrpc.NewTestServiceClient(cc) - // Push an update to the resolver with fallback backend address stored in - // the `Addresses` field and an invalid remote balancer address stored in - // attributes, which will cause fallback behavior to be invoked. - rs := resolver.State{ - Addresses: []resolver.Address{{Addr: beLis.Addr().String()}}, - ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig), - } - rs = grpclbstate.Set(rs, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: "invalid.address", ServerName: lbServerName}}}) - r.UpdateState(rs) - // Make an RPC and verify that it got routed to the fallback backend. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -1187,23 +1187,22 @@ func (s) TestGRPCLB_BackendConnectionErrorPropagation(t *testing.T) { standaloneBEs := startBackends(t, "arbitrary.invalid.name", true, beLis) defer stopBackends(standaloneBEs) - cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, + rs := resolver.State{ + Addresses: []resolver.Address{{Addr: beLis.Addr().String()}}, + ServiceConfig: internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(grpclbConfig), + } + rs = grpclbstate.Set(rs, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: tss.lbAddr, ServerName: lbServerName}}}) + r.InitialState(rs) + cc, err := grpc.NewClient(r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), grpc.WithTransportCredentials(&serverNameCheckCreds{}), grpc.WithContextDialer(fakeNameDialer)) if err != nil { - t.Fatalf("Failed to dial to the backend %v", err) + t.Fatalf("Failed to create new client to the backend %v", err) } defer cc.Close() testC := testgrpc.NewTestServiceClient(cc) - rs := resolver.State{ - Addresses: []resolver.Address{{Addr: beLis.Addr().String()}}, - ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig), - } - rs = grpclbstate.Set(rs, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: tss.lbAddr, ServerName: lbServerName}}}) - r.UpdateState(rs) - // If https://github.com/grpc/grpc-go/blob/65cabd74d8e18d7347fecd414fa8d83a00035f5f/balancer/grpclb/grpclb_test.go#L103 // changes, then expectedErrMsg may need to be updated. const expectedErrMsg = "received unexpected server name" From 18d218d14d5076fae0995363a85fb2accb7b4979 Mon Sep 17 00:00:00 2001 From: Arjan Singh Bal <46515553+arjan-bal@users.noreply.github.com> Date: Wed, 6 Nov 2024 11:38:54 +0530 Subject: [PATCH 47/57] pickfirst: Interleave IPv6 and IPv4 addresses for happy eyeballs (#7742) --- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 77 ++++++- .../pickfirstleaf/pickfirstleaf_ext_test.go | 204 ++++++++++++++++-- internal/testutils/balancer.go | 2 + 3 files changed, 257 insertions(+), 26 deletions(-) diff --git a/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index 985b6edc7f4c..4b54866058d5 100644 --- a/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -29,6 +29,7 @@ import ( "encoding/json" "errors" "fmt" + "net" "sync" "google.golang.org/grpc/balancer" @@ -61,6 +62,16 @@ var ( // TODO: change to pick-first when this becomes the default pick_first policy. const logPrefix = "[pick-first-leaf-lb %p] " +type ipAddrFamily int + +const ( + // ipAddrFamilyUnknown represents strings that can't be parsed as an IP + // address. + ipAddrFamilyUnknown ipAddrFamily = iota + ipAddrFamilyV4 + ipAddrFamilyV6 +) + type pickfirstBuilder struct{} func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { @@ -206,9 +217,6 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // "Flatten the list by concatenating the ordered list of addresses for // each of the endpoints, in order." - A61 for _, endpoint := range endpoints { - // "In the flattened list, interleave addresses from the two address - // families, as per RFC-8305 section 4." - A61 - // TODO: support the above language. newAddrs = append(newAddrs, endpoint.Addresses...) } } else { @@ -232,6 +240,8 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // SubConn multiple times in the same pass. We don't want this. newAddrs = deDupAddresses(newAddrs) + newAddrs = interleaveAddresses(newAddrs) + // Since we have a new set of addresses, we are again at first pass. b.firstPass = true @@ -314,6 +324,67 @@ func deDupAddresses(addrs []resolver.Address) []resolver.Address { return retAddrs } +// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) +// as per RFC-8305 section 4. +// Whichever address family is first in the list is followed by an address of +// the other address family; that is, if the first address in the list is IPv6, +// then the first IPv4 address should be moved up in the list to be second in +// the list. It doesn't support configuring "First Address Family Count", i.e. +// there will always be a single member of the first address family at the +// beginning of the interleaved list. +// Addresses that are neither IPv4 nor IPv6 are treated as part of a third +// "unknown" family for interleaving. +// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 +func interleaveAddresses(addrs []resolver.Address) []resolver.Address { + familyAddrsMap := map[ipAddrFamily][]resolver.Address{} + interleavingOrder := []ipAddrFamily{} + for _, addr := range addrs { + family := addressFamily(addr.Addr) + if _, found := familyAddrsMap[family]; !found { + interleavingOrder = append(interleavingOrder, family) + } + familyAddrsMap[family] = append(familyAddrsMap[family], addr) + } + + interleavedAddrs := make([]resolver.Address, 0, len(addrs)) + + for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { + // Some IP types may have fewer addresses than others, so we look for + // the next type that has a remaining member to add to the interleaved + // list. + family := interleavingOrder[curFamilyIdx] + remainingMembers := familyAddrsMap[family] + if len(remainingMembers) > 0 { + interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) + familyAddrsMap[family] = remainingMembers[1:] + } + } + + return interleavedAddrs +} + +// addressFamily returns the ipAddrFamily after parsing the address string. +// If the address isn't of the format "ip-address:port", it returns +// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when +// using a resolver like passthrough where the address may be a hostname in +// some format that the dialer can resolve. +func addressFamily(address string) ipAddrFamily { + // Parse the IP after removing the port. + host, _, err := net.SplitHostPort(address) + if err != nil { + return ipAddrFamilyUnknown + } + ip := net.ParseIP(host) + switch { + case ip.To4() != nil: + return ipAddrFamilyV4 + case ip.To16() != nil: + return ipAddrFamilyV6 + default: + return ipAddrFamilyUnknown + } +} + // reconcileSubConnsLocked updates the active subchannels based on a new address // list from the resolver. It does this by: // - closing subchannels: any existing subchannels associated with addresses diff --git a/balancer/pickfirst/pickfirstleaf/pickfirstleaf_ext_test.go b/balancer/pickfirst/pickfirstleaf/pickfirstleaf_ext_test.go index 2ab40ef1615a..46e47be43ffa 100644 --- a/balancer/pickfirst/pickfirstleaf/pickfirstleaf_ext_test.go +++ b/balancer/pickfirst/pickfirstleaf/pickfirstleaf_ext_test.go @@ -13,7 +13,6 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ package pickfirstleaf_test @@ -114,7 +113,7 @@ func setupPickFirstLeaf(t *testing.T, backendCount int, opts ...grpc.DialOption) // of the servers is running. // 2. RPCs are sent to verify they reach the running server. // -// The state transitions of the ClientConn and all the subconns created are +// The state transitions of the ClientConn and all the SubConns created are // verified. func (s) TestPickFirstLeaf_SimpleResolverUpdate_FirstServerReady(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -144,7 +143,7 @@ func (s) TestPickFirstLeaf_SimpleResolverUpdate_FirstServerReady(t *testing.T) { {Addrs: []resolver.Address{addrs[0]}, State: connectivity.Ready}, } if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { - t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + t.Errorf("SubConn states mismatch (-want +got):\n%s", diff) } wantConnStateTransitions := []connectivity.State{ @@ -186,7 +185,7 @@ func (s) TestPickFirstLeaf_SimpleResolverUpdate_FirstServerUnReady(t *testing.T) {Addrs: []resolver.Address{addrs[1]}, State: connectivity.Ready}, } if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { - t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + t.Errorf("SubConn states mismatch (-want +got):\n%s", diff) } wantConnStateTransitions := []connectivity.State{ @@ -231,7 +230,7 @@ func (s) TestPickFirstLeaf_SimpleResolverUpdate_DuplicateAddrs(t *testing.T) { {Addrs: []resolver.Address{addrs[1]}, State: connectivity.Ready}, } if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { - t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + t.Errorf("SubConn states mismatch (-want +got):\n%s", diff) } wantConnStateTransitions := []connectivity.State{ @@ -252,7 +251,7 @@ func (s) TestPickFirstLeaf_SimpleResolverUpdate_DuplicateAddrs(t *testing.T) { // running. This may not be the same server as before. // 4. RPCs are sent to verify they reach the running server. // -// The state transitions of the ClientConn and all the subconns created are +// The state transitions of the ClientConn and all the SubConns created are // verified. func (s) TestPickFirstLeaf_ResolverUpdates_DisjointLists(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -285,7 +284,7 @@ func (s) TestPickFirstLeaf_ResolverUpdates_DisjointLists(t *testing.T) { } if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { - t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + t.Errorf("SubConn states mismatch (-want +got):\n%s", diff) } bm.backends[2].S.Stop() @@ -303,7 +302,7 @@ func (s) TestPickFirstLeaf_ResolverUpdates_DisjointLists(t *testing.T) { } if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { - t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + t.Errorf("SubConn states mismatch (-want +got):\n%s", diff) } wantConnStateTransitions := []connectivity.State{ @@ -348,7 +347,7 @@ func (s) TestPickFirstLeaf_ResolverUpdates_ActiveBackendInUpdatedList(t *testing } if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { - t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + t.Errorf("SubConn states mismatch (-want +got):\n%s", diff) } bm.backends[2].S.Stop() @@ -369,7 +368,7 @@ func (s) TestPickFirstLeaf_ResolverUpdates_ActiveBackendInUpdatedList(t *testing } if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { - t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + t.Errorf("SubConn states mismatch (-want +got):\n%s", diff) } wantConnStateTransitions := []connectivity.State{ @@ -412,7 +411,7 @@ func (s) TestPickFirstLeaf_ResolverUpdates_InActiveBackendInUpdatedList(t *testi } if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { - t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + t.Errorf("SubConn states mismatch (-want +got):\n%s", diff) } bm.backends[2].S.Stop() @@ -432,7 +431,7 @@ func (s) TestPickFirstLeaf_ResolverUpdates_InActiveBackendInUpdatedList(t *testi } if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { - t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + t.Errorf("SubConn states mismatch (-want +got):\n%s", diff) } wantConnStateTransitions := []connectivity.State{ @@ -477,7 +476,7 @@ func (s) TestPickFirstLeaf_ResolverUpdates_IdenticalLists(t *testing.T) { } if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { - t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + t.Errorf("SubConn states mismatch (-want +got):\n%s", diff) } r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[0], addrs[1]}}) @@ -496,7 +495,7 @@ func (s) TestPickFirstLeaf_ResolverUpdates_IdenticalLists(t *testing.T) { } if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { - t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + t.Errorf("SubConn states mismatch (-want +got):\n%s", diff) } wantConnStateTransitions := []connectivity.State{ @@ -551,7 +550,7 @@ func (s) TestPickFirstLeaf_StopConnectedServer_FirstServerRestart(t *testing.T) } if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { - t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + t.Errorf("SubConn states mismatch (-want +got):\n%s", diff) } // Shut down the connected server. @@ -569,7 +568,7 @@ func (s) TestPickFirstLeaf_StopConnectedServer_FirstServerRestart(t *testing.T) } if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { - t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + t.Errorf("SubConn states mismatch (-want +got):\n%s", diff) } wantConnStateTransitions := []connectivity.State{ @@ -617,7 +616,7 @@ func (s) TestPickFirstLeaf_StopConnectedServer_SecondServerRestart(t *testing.T) } if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { - t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + t.Errorf("SubConn states mismatch (-want +got):\n%s", diff) } // Shut down the connected server. @@ -641,7 +640,7 @@ func (s) TestPickFirstLeaf_StopConnectedServer_SecondServerRestart(t *testing.T) } if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { - t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + t.Errorf("SubConn states mismatch (-want +got):\n%s", diff) } wantConnStateTransitions := []connectivity.State{ @@ -689,7 +688,7 @@ func (s) TestPickFirstLeaf_StopConnectedServer_SecondServerToFirst(t *testing.T) } if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { - t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + t.Errorf("SubConn states mismatch (-want +got):\n%s", diff) } // Shut down the connected server. @@ -713,7 +712,7 @@ func (s) TestPickFirstLeaf_StopConnectedServer_SecondServerToFirst(t *testing.T) } if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { - t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + t.Errorf("SubConn states mismatch (-want +got):\n%s", diff) } wantConnStateTransitions := []connectivity.State{ @@ -760,7 +759,7 @@ func (s) TestPickFirstLeaf_StopConnectedServer_FirstServerToSecond(t *testing.T) } if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { - t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + t.Errorf("SubConn states mismatch (-want +got):\n%s", diff) } // Shut down the connected server. @@ -783,7 +782,7 @@ func (s) TestPickFirstLeaf_StopConnectedServer_FirstServerToSecond(t *testing.T) } if diff := cmp.Diff(wantSCStates, bal.subConnStates()); diff != "" { - t.Errorf("subconn states mismatch (-want +got):\n%s", diff) + t.Errorf("SubConn states mismatch (-want +got):\n%s", diff) } wantConnStateTransitions := []connectivity.State{ @@ -851,7 +850,166 @@ func (s) TestPickFirstLeaf_EmptyAddressList(t *testing.T) { } } -// stateStoringBalancer stores the state of the subconns being created. +func (s) TestPickFirstLeaf_InterleavingIPV4Preffered(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewBalancerClientConn(t) + bal := balancer.Get(pickfirstleaf.Name).Build(cc, balancer.BuildOptions{}) + defer bal.Close() + ccState := balancer.ClientConnState{ + ResolverState: resolver.State{ + Endpoints: []resolver.Endpoint{ + {Addresses: []resolver.Address{{Addr: "1.1.1.1:1111"}}}, + {Addresses: []resolver.Address{{Addr: "2.2.2.2:2"}}}, + {Addresses: []resolver.Address{{Addr: "3.3.3.3:3"}}}, + // IPv4-mapped IPv6 address, considered as an IPv4 for + // interleaving. + {Addresses: []resolver.Address{{Addr: "[::FFFF:192.168.0.1]:2222"}}}, + {Addresses: []resolver.Address{{Addr: "[0001:0001:0001:0001:0001:0001:0001:0001]:8080"}}}, + {Addresses: []resolver.Address{{Addr: "[0002:0002:0002:0002:0002:0002:0002:0002]:8080"}}}, + {Addresses: []resolver.Address{{Addr: "[0003:0003:0003:0003:0003:0003:0003:0003]:3333"}}}, + {Addresses: []resolver.Address{{Addr: "grpc.io:80"}}}, // not an IP. + }, + }, + } + if err := bal.UpdateClientConnState(ccState); err != nil { + t.Fatalf("UpdateClientConnState(%v) returned error: %v", ccState, err) + } + + wantAddrs := []resolver.Address{ + {Addr: "1.1.1.1:1111"}, + {Addr: "[0001:0001:0001:0001:0001:0001:0001:0001]:8080"}, + {Addr: "grpc.io:80"}, + {Addr: "2.2.2.2:2"}, + {Addr: "[0002:0002:0002:0002:0002:0002:0002:0002]:8080"}, + {Addr: "3.3.3.3:3"}, + {Addr: "[0003:0003:0003:0003:0003:0003:0003:0003]:3333"}, + {Addr: "[::FFFF:192.168.0.1]:2222"}, + } + + gotAddrs, err := subConnAddresses(ctx, cc, 8) + if err != nil { + t.Fatalf("%v", err) + } + if diff := cmp.Diff(wantAddrs, gotAddrs); diff != "" { + t.Errorf("SubConn creation order mismatch (-want +got):\n%s", diff) + } +} + +func (s) TestPickFirstLeaf_InterleavingIPv6Preffered(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewBalancerClientConn(t) + bal := balancer.Get(pickfirstleaf.Name).Build(cc, balancer.BuildOptions{}) + defer bal.Close() + ccState := balancer.ClientConnState{ + ResolverState: resolver.State{ + Endpoints: []resolver.Endpoint{ + {Addresses: []resolver.Address{{Addr: "[0001:0001:0001:0001:0001:0001:0001:0001]:8080"}}}, + {Addresses: []resolver.Address{{Addr: "1.1.1.1:1111"}}}, + {Addresses: []resolver.Address{{Addr: "2.2.2.2:2"}}}, + {Addresses: []resolver.Address{{Addr: "3.3.3.3:3"}}}, + {Addresses: []resolver.Address{{Addr: "[::FFFF:192.168.0.1]:2222"}}}, + {Addresses: []resolver.Address{{Addr: "[0002:0002:0002:0002:0002:0002:0002:0002]:2222"}}}, + {Addresses: []resolver.Address{{Addr: "[0003:0003:0003:0003:0003:0003:0003:0003]:3333"}}}, + {Addresses: []resolver.Address{{Addr: "grpc.io:80"}}}, // not an IP. + }, + }, + } + if err := bal.UpdateClientConnState(ccState); err != nil { + t.Fatalf("UpdateClientConnState(%v) returned error: %v", ccState, err) + } + + wantAddrs := []resolver.Address{ + {Addr: "[0001:0001:0001:0001:0001:0001:0001:0001]:8080"}, + {Addr: "1.1.1.1:1111"}, + {Addr: "grpc.io:80"}, + {Addr: "[0002:0002:0002:0002:0002:0002:0002:0002]:2222"}, + {Addr: "2.2.2.2:2"}, + {Addr: "[0003:0003:0003:0003:0003:0003:0003:0003]:3333"}, + {Addr: "3.3.3.3:3"}, + {Addr: "[::FFFF:192.168.0.1]:2222"}, + } + + gotAddrs, err := subConnAddresses(ctx, cc, 8) + if err != nil { + t.Fatalf("%v", err) + } + if diff := cmp.Diff(wantAddrs, gotAddrs); diff != "" { + t.Errorf("SubConn creation order mismatch (-want +got):\n%s", diff) + } +} + +func (s) TestPickFirstLeaf_InterleavingUnknownPreffered(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewBalancerClientConn(t) + bal := balancer.Get(pickfirstleaf.Name).Build(cc, balancer.BuildOptions{}) + defer bal.Close() + ccState := balancer.ClientConnState{ + ResolverState: resolver.State{ + Endpoints: []resolver.Endpoint{ + {Addresses: []resolver.Address{{Addr: "grpc.io:80"}}}, // not an IP. + {Addresses: []resolver.Address{{Addr: "1.1.1.1:1111"}}}, + {Addresses: []resolver.Address{{Addr: "2.2.2.2:2"}}}, + {Addresses: []resolver.Address{{Addr: "3.3.3.3:3"}}}, + {Addresses: []resolver.Address{{Addr: "[::FFFF:192.168.0.1]:2222"}}}, + {Addresses: []resolver.Address{{Addr: "[0001:0001:0001:0001:0001:0001:0001:0001]:8080"}}}, + {Addresses: []resolver.Address{{Addr: "[0002:0002:0002:0002:0002:0002:0002:0002]:8080"}}}, + {Addresses: []resolver.Address{{Addr: "[0003:0003:0003:0003:0003:0003:0003:0003]:3333"}}}, + {Addresses: []resolver.Address{{Addr: "example.com:80"}}}, // not an IP. + }, + }, + } + if err := bal.UpdateClientConnState(ccState); err != nil { + t.Fatalf("UpdateClientConnState(%v) returned error: %v", ccState, err) + } + + wantAddrs := []resolver.Address{ + {Addr: "grpc.io:80"}, + {Addr: "1.1.1.1:1111"}, + {Addr: "[0001:0001:0001:0001:0001:0001:0001:0001]:8080"}, + {Addr: "example.com:80"}, + {Addr: "2.2.2.2:2"}, + {Addr: "[0002:0002:0002:0002:0002:0002:0002:0002]:8080"}, + {Addr: "3.3.3.3:3"}, + {Addr: "[0003:0003:0003:0003:0003:0003:0003:0003]:3333"}, + {Addr: "[::FFFF:192.168.0.1]:2222"}, + } + + gotAddrs, err := subConnAddresses(ctx, cc, 9) + if err != nil { + t.Fatalf("%v", err) + } + if diff := cmp.Diff(wantAddrs, gotAddrs); diff != "" { + t.Errorf("SubConn creation order mismatch (-want +got):\n%s", diff) + } +} + +// subConnAddresses makes the pickfirst balancer create the requested number of +// SubConns by triggering transient failures. The function returns the +// addresses of the created SubConns. +func subConnAddresses(ctx context.Context, cc *testutils.BalancerClientConn, subConnCount int) ([]resolver.Address, error) { + addresses := []resolver.Address{} + for i := 0; i < subConnCount; i++ { + select { + case <-ctx.Done(): + return nil, fmt.Errorf("test timed out after creating %d subchannels, want %d", i, subConnCount) + case sc := <-cc.NewSubConnCh: + if len(sc.Addresses) != 1 { + return nil, fmt.Errorf("new subchannel created with %d addresses, want 1", len(sc.Addresses)) + } + addresses = append(addresses, sc.Addresses[0]) + sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + sc.UpdateState(balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + }) + } + } + return addresses, nil +} + +// stateStoringBalancer stores the state of the SubConns being created. type stateStoringBalancer struct { balancer.Balancer mu sync.Mutex diff --git a/internal/testutils/balancer.go b/internal/testutils/balancer.go index 423e8d25f2f2..d1b4292b486f 100644 --- a/internal/testutils/balancer.go +++ b/internal/testutils/balancer.go @@ -38,6 +38,7 @@ type TestSubConn struct { ConnectCh chan struct{} stateListener func(balancer.SubConnState) connectCalled *grpcsync.Event + Addresses []resolver.Address } // NewTestSubConn returns a newly initialized SubConn. Typically, subconns @@ -132,6 +133,7 @@ func (tcc *BalancerClientConn) NewSubConn(a []resolver.Address, o balancer.NewSu ConnectCh: make(chan struct{}, 1), stateListener: o.StateListener, connectCalled: grpcsync.NewEvent(), + Addresses: a, } tcc.subConnIdx++ tcc.logger.Logf("testClientConn: NewSubConn(%v, %+v) => %s", a, o, sc) From b3393d95a74e059d5663c758ec002df156a4091f Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Wed, 6 Nov 2024 11:52:02 -0800 Subject: [PATCH 48/57] xdsclient: support fallback within an authority (#7701) --- xds/internal/xdsclient/authority.go | 218 +++++- xds/internal/xdsclient/client_new.go | 2 +- xds/internal/xdsclient/tests/fallback_test.go | 628 ++++++++++++++++++ 3 files changed, 827 insertions(+), 21 deletions(-) create mode 100644 xds/internal/xdsclient/tests/fallback_test.go diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index bd1662e8bca7..b9052ffe8d14 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -40,7 +40,7 @@ type resourceState struct { cache xdsresource.ResourceData // Most recent ACKed update for this resource. md xdsresource.UpdateMetadata // Metadata for the most recent update. deletionIgnored bool // True, if resource deletion was ignored for a prior update. - xdsChannelConfigs []*xdsChannelWithConfig // List of xdsChannels where this resource is subscribed. + xdsChannelConfigs map[*xdsChannelWithConfig]bool // Set of xdsChannels where this resource is subscribed. } // xdsChannelForADS is used to acquire a reference to an xdsChannel. This @@ -59,9 +59,9 @@ type xdsChannelForADS func(*bootstrap.ServerConfig, *authority) (*xdsChannel, fu // xdsChannelWithConfig is a struct that holds an xdsChannel and its associated // ServerConfig, along with a cleanup function to release the xdsChannel. type xdsChannelWithConfig struct { - xc *xdsChannel - sc *bootstrap.ServerConfig - cleanup func() + channel *xdsChannel + serverConfig *bootstrap.ServerConfig + cleanup func() } // authority provides the functionality required to communicate with a @@ -149,7 +149,7 @@ func newAuthority(args authorityBuildOptions) *authority { // first watch is registered, and channels to other server configurations // are created as needed to support fallback. for _, sc := range args.serverConfigs { - ret.xdsChannelConfigs = append(ret.xdsChannelConfigs, &xdsChannelWithConfig{sc: sc}) + ret.xdsChannelConfigs = append(ret.xdsChannelConfigs, &xdsChannelWithConfig{serverConfig: sc}) } return ret } @@ -200,8 +200,101 @@ func (a *authority) handleADSStreamFailure(serverConfig *bootstrap.ServerConfig, } } - // TODO(easwars-fallback): Trigger fallback here if conditions for fallback - // are met. + // Two conditions need to be met for fallback to be triggered: + // 1. There is a connectivity failure on the ADS stream, as described in + // gRFC A57. For us, this means that the ADS stream was closed before the + // first server response was received. We already checked that condition + // earlier in this method. + // 2. There is at least one watcher for a resource that is not cached. + // Cached resources include ones that + // - have been successfully received and can be used. + // - are considered non-existent according to xDS Protocol Specification. + if !a.watcherExistsForUncachedResource() { + if a.logger.V(2) { + a.logger.Infof("No watchers for uncached resources. Not triggering fallback") + } + return + } + a.fallbackToNextServerIfPossible(serverConfig) +} + +// serverIndexForConfig returns the index of the xdsChannelConfig that matches +// the provided ServerConfig. If no match is found, it returns the length of the +// xdsChannelConfigs slice, which represents the index of a non-existent config. +func (a *authority) serverIndexForConfig(sc *bootstrap.ServerConfig) int { + for i, cfg := range a.xdsChannelConfigs { + if cfg.serverConfig.Equal(sc) { + return i + } + } + return len(a.xdsChannelConfigs) +} + +// Determines the server to fallback to and triggers fallback to the same. If +// required, creates an xdsChannel to that server, and re-subscribes to all +// existing resources. +// +// Only executed in the context of a serializer callback. +func (a *authority) fallbackToNextServerIfPossible(failingServerConfig *bootstrap.ServerConfig) { + if a.logger.V(2) { + a.logger.Infof("Attempting to initiate fallback after failure from server %q", failingServerConfig) + } + + // The server to fallback to is the next server on the list. If the current + // server is the last server, then there is nothing that can be done. + currentServerIdx := a.serverIndexForConfig(failingServerConfig) + if currentServerIdx == len(a.xdsChannelConfigs) { + // This can never happen. + a.logger.Errorf("Received error from an unknown server: %s", failingServerConfig) + return + } + if currentServerIdx == len(a.xdsChannelConfigs)-1 { + if a.logger.V(2) { + a.logger.Infof("No more servers to fallback to") + } + return + } + fallbackServerIdx := currentServerIdx + 1 + fallbackChannel := a.xdsChannelConfigs[fallbackServerIdx] + + // If the server to fallback to already has an xdsChannel, it means that + // this connectivity error is from a server with a higher priority. There + // is not much we can do here. + if fallbackChannel.channel != nil { + if a.logger.V(2) { + a.logger.Infof("Channel to the next server in the list %q already exists", fallbackChannel.serverConfig) + } + return + } + + // Create an xdsChannel for the fallback server. + if a.logger.V(2) { + a.logger.Infof("Initiating fallback to server %s", fallbackChannel.serverConfig) + } + xc, cleanup, err := a.getChannelForADS(fallbackChannel.serverConfig, a) + if err != nil { + a.logger.Errorf("Failed to create XDS channel: %v", err) + return + } + fallbackChannel.channel = xc + fallbackChannel.cleanup = cleanup + a.activeXDSChannel = fallbackChannel + + // Subscribe to all existing resources from the new management server. + for typ, resources := range a.resources { + for name, state := range resources { + if a.logger.V(2) { + a.logger.Infof("Resubscribing to resource of type %q and name %q", typ.TypeName(), name) + } + xc.subscribe(typ, name) + + // Add the fallback channel to the list of xdsChannels from which + // this resource has been requested from. Retain the cached resource + // and the set of existing watchers (and other metadata fields) in + // the resource state. + state.xdsChannelConfigs[fallbackChannel] = true + } + } } // adsResourceUpdate is called to notify the authority about a resource update @@ -218,13 +311,15 @@ func (a *authority) adsResourceUpdate(serverConfig *bootstrap.ServerConfig, rTyp // handleADSResourceUpdate processes an update from the xDS client, updating the // resource cache and notifying any registered watchers of the update. // +// If the update is received from a higher priority xdsChannel that was +// previously down, we revert to it and close all lower priority xdsChannels. +// // Once the update has been processed by all watchers, the authority is expected // to invoke the onDone callback. // // Only executed in the context of a serializer callback. func (a *authority) handleADSResourceUpdate(serverConfig *bootstrap.ServerConfig, rType xdsresource.Type, updates map[string]ads.DataAndErrTuple, md xdsresource.UpdateMetadata, onDone func()) { - // TODO(easwars-fallback): Trigger reverting to a higher priority server if - // the update is from one. + a.handleRevertingToPrimaryOnUpdate(serverConfig) // We build a list of callback funcs to invoke, and invoke them at the end // of this method instead of inline (when handling the update for a @@ -416,6 +511,74 @@ func (a *authority) handleADSResourceDoesNotExist(rType xdsresource.Type, resour } } +// handleRevertingToPrimaryOnUpdate is called when a resource update is received +// from the xDS client. +// +// If the update is from the currently active server, nothing is done. Else, all +// lower priority servers are closed and the active server is reverted to the +// highest priority server that sent the update. +// +// This method is only executed in the context of a serializer callback. +func (a *authority) handleRevertingToPrimaryOnUpdate(serverConfig *bootstrap.ServerConfig) { + if a.activeXDSChannel != nil && a.activeXDSChannel.serverConfig.Equal(serverConfig) { + // If the resource update is from the current active server, nothing + // needs to be done from fallback point of view. + return + } + + if a.logger.V(2) { + a.logger.Infof("Received update from non-active server %q", serverConfig) + } + + // If the resource update is not from the current active server, it means + // that we have received an update from a higher priority server and we need + // to revert back to it. This method guarantees that when an update is + // received from a server, all lower priority servers are closed. + serverIdx := a.serverIndexForConfig(serverConfig) + if serverIdx == len(a.xdsChannelConfigs) { + // This can never happen. + a.logger.Errorf("Received update from an unknown server: %s", serverConfig) + return + } + a.activeXDSChannel = a.xdsChannelConfigs[serverIdx] + + // Close all lower priority channels. + // + // But before closing any channel, we need to unsubscribe from any resources + // that were subscribed to on this channel. Resources could be subscribed to + // from multiple channels as we fallback to lower priority servers. But when + // a higher priority one comes back up, we need to unsubscribe from all + // lower priority ones before releasing the reference to them. + for i := serverIdx + 1; i < len(a.xdsChannelConfigs); i++ { + cfg := a.xdsChannelConfigs[i] + + for rType, rState := range a.resources { + for resourceName, state := range rState { + for xcc := range state.xdsChannelConfigs { + if xcc != cfg { + continue + } + // If the current resource is subscribed to on this channel, + // unsubscribe, and remove the channel from the list of + // channels that this resource is subscribed to. + xcc.channel.unsubscribe(rType, resourceName) + delete(state.xdsChannelConfigs, xcc) + } + } + } + + // Release the reference to the channel. + if cfg.cleanup != nil { + if a.logger.V(2) { + a.logger.Infof("Closing lower priority server %q", cfg.serverConfig) + } + cfg.cleanup() + cfg.cleanup = nil + } + cfg.channel = nil + } +} + // watchResource registers a new watcher for the specified resource type and // name. It returns a function that can be called to cancel the watch. // @@ -462,10 +625,10 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w state = &resourceState{ watchers: make(map[xdsresource.ResourceWatcher]bool), md: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}, - xdsChannelConfigs: []*xdsChannelWithConfig{xdsChannel}, + xdsChannelConfigs: map[*xdsChannelWithConfig]bool{xdsChannel: true}, } resources[resourceName] = state - xdsChannel.xc.subscribe(rType, resourceName) + xdsChannel.channel.subscribe(rType, resourceName) } // Always add the new watcher to the set of watchers. state.watchers[watcher] = true @@ -516,8 +679,8 @@ func (a *authority) unwatchResource(rType xdsresource.Type, resourceName string, if a.logger.V(2) { a.logger.Infof("Removing last watch for resource name %q", resourceName) } - for _, xc := range state.xdsChannelConfigs { - xc.xc.unsubscribe(rType, resourceName) + for xcc := range state.xdsChannelConfigs { + xcc.channel.unsubscribe(rType, resourceName) } delete(resources, resourceName) @@ -553,13 +716,13 @@ func (a *authority) xdsChannelToUse() *xdsChannelWithConfig { return a.activeXDSChannel } - sc := a.xdsChannelConfigs[0].sc + sc := a.xdsChannelConfigs[0].serverConfig xc, cleanup, err := a.getChannelForADS(sc, a) if err != nil { a.logger.Warningf("Failed to create xDS channel: %v", err) return nil } - a.xdsChannelConfigs[0].xc = xc + a.xdsChannelConfigs[0].channel = xc a.xdsChannelConfigs[0].cleanup = cleanup a.activeXDSChannel = a.xdsChannelConfigs[0] return a.activeXDSChannel @@ -570,16 +733,31 @@ func (a *authority) xdsChannelToUse() *xdsChannelWithConfig { // // Only executed in the context of a serializer callback. func (a *authority) closeXDSChannels() { - for _, xc := range a.xdsChannelConfigs { - if xc.cleanup != nil { - xc.cleanup() - xc.cleanup = nil + for _, xcc := range a.xdsChannelConfigs { + if xcc.cleanup != nil { + xcc.cleanup() + xcc.cleanup = nil } - xc.xc = nil + xcc.channel = nil } a.activeXDSChannel = nil } +// watcherExistsForUncachedResource returns true if there is at least one +// watcher for a resource that has not yet been cached. +// +// Only executed in the context of a serializer callback. +func (a *authority) watcherExistsForUncachedResource() bool { + for _, resourceStates := range a.resources { + for _, state := range resourceStates { + if state.md.Status == xdsresource.ServiceStatusRequested { + return true + } + } + } + return false +} + // dumpResources returns a dump of the resource configuration cached by this // authority, for CSDS purposes. func (a *authority) dumpResources() []*v3statuspb.ClientConfig_GenericXdsConfig { diff --git a/xds/internal/xdsclient/client_new.go b/xds/internal/xdsclient/client_new.go index 839cf23833b3..82e549fda53a 100644 --- a/xds/internal/xdsclient/client_new.go +++ b/xds/internal/xdsclient/client_new.go @@ -108,6 +108,7 @@ func newClientImpl(config *bootstrap.Config, watchExpiryTimeout, idleChannelExpi type OptionsForTesting struct { // Name is a unique name for this xDS client. Name string + // Contents contain a JSON representation of the bootstrap configuration to // be used when creating the xDS client. Contents []byte @@ -180,7 +181,6 @@ func GetForTesting(name string) (XDSClient, func(), error) { func init() { internal.TriggerXDSResourceNotFoundForTesting = triggerXDSResourceNotFoundForTesting xdsclientinternal.ResourceWatchStateForTesting = resourceWatchStateForTesting - } func triggerXDSResourceNotFoundForTesting(client XDSClient, typ xdsresource.Type, name string) error { diff --git a/xds/internal/xdsclient/tests/fallback_test.go b/xds/internal/xdsclient/tests/fallback_test.go new file mode 100644 index 000000000000..514945f833d0 --- /dev/null +++ b/xds/internal/xdsclient/tests/fallback_test.go @@ -0,0 +1,628 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient_test + +import ( + "context" + "fmt" + "sync/atomic" + "testing" + "time" + + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/internal/xds/bootstrap" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +// Give the fallback tests additional time to complete because they need to +// first identify failed connections before establishing new ones. +const defaultFallbackTestTimeout = 2 * defaultTestTimeout + +func waitForRPCsToReachBackend(ctx context.Context, client testgrpc.TestServiceClient, backend string) error { + var lastErr error + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + var peer peer.Peer + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)); err != nil { + lastErr = err + continue + } + // Veirfy the peer when the RPC succeeds. + if peer.Addr.String() == backend { + break + } + } + if ctx.Err() != nil { + return fmt.Errorf("timeout when waiting for RPCs to reach expected backend. Last error: %v", lastErr) + } + return nil +} + +// Tests fallback on startup where the xDS client is unable to establish a +// connection to the primary server. The test verifies that the xDS client falls +// back to the secondary server, and when the primary comes back up, it reverts +// to it. The test also verifies that when all requested resources are cached +// from the primary, fallback is not triggered when the connection goes down. +func (s) TestFallback_OnStartup(t *testing.T) { + // Enable fallback env var. + origFallbackEnv := envconfig.XDSFallbackSupport + envconfig.XDSFallbackSupport = true + defer func() { envconfig.XDSFallbackSupport = origFallbackEnv }() + + ctx, cancel := context.WithTimeout(context.Background(), defaultFallbackTestTimeout) + defer cancel() + + // Create two listeners for the two management servers. The test can + // start/stop these listeners and can also get notified when the listener + // receives a connection request. + primaryWrappedLis := testutils.NewListenerWrapper(t, nil) + primaryLis := testutils.NewRestartableListener(primaryWrappedLis) + fallbackWrappedLis := testutils.NewListenerWrapper(t, nil) + fallbackLis := testutils.NewRestartableListener(fallbackWrappedLis) + + // Start two management servers, primary and fallback, with the above + // listeners. + primaryManagementServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{Listener: primaryLis}) + fallbackManagementServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{Listener: fallbackLis}) + + // Start two test service backends. + backend1 := stubserver.StartTestService(t, nil) + defer backend1.Stop() + backend2 := stubserver.StartTestService(t, nil) + defer backend2.Stop() + + // Configure xDS resource on the primary management server, with a cluster + // resource that contains an endpoint for backend1. + nodeID := uuid.New().String() + const serviceName = "my-service-fallback-xds" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: testutils.ParsePort(t, backend1.Address), + SecLevel: e2e.SecurityLevelNone, + }) + if err := primaryManagementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Configure xDS resource on the secondary management server, with a cluster + // resource that contains an endpoint for backend2. Only the listener + // resource has the same name on both servers. + fallbackRouteConfigName := "fallback-route-" + serviceName + fallbackClusterName := "fallback-cluster-" + serviceName + fallbackEndpointsName := "fallback-endpoints-" + serviceName + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, fallbackRouteConfigName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(fallbackRouteConfigName, serviceName, fallbackClusterName)}, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(fallbackClusterName, fallbackEndpointsName, e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(fallbackEndpointsName, "localhost", []uint32{testutils.ParsePort(t, backend2.Address)})}, + } + if err := fallbackManagementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Shut both management servers down before starting the gRPC client to + // trigger fallback on startup. + primaryLis.Stop() + fallbackLis.Stop() + + // Generate bootstrap configuration with the above two servers. + bootstrapContents, err := bootstrap.NewContentsForTesting(bootstrap.ConfigOptionsForTesting{ + Servers: []byte(fmt.Sprintf(`[ + { + "server_uri": %q, + "channel_creds": [{"type": "insecure"}] + }, + { + "server_uri": %q, + "channel_creds": [{"type": "insecure"}] + }]`, primaryManagementServer.Address, fallbackManagementServer.Address)), + Node: []byte(fmt.Sprintf(`{"id": "%s"}`, nodeID)), + }) + if err != nil { + t.Fatalf("Failed to create bootstrap file: %v", err) + } + + // Create an xDS client with the above bootstrap configuration and a short + // idle channel expiry timeout. This ensures that connections to lower + // priority servers get closed quickly, for the test to verify. + xdsC, close, err := xdsclient.NewForTesting(xdsclient.OptionsForTesting{ + Name: t.Name(), + Contents: bootstrapContents, + IdleChannelExpiryTimeout: defaultTestIdleChannelExpiryTimeout, + }) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Get the xDS resolver to use the above xDS client. + resolverBuilder := internal.NewXDSResolverWithClientForTesting.(func(xdsclient.XDSClient) (resolver.Builder, error)) + resolver, err := resolverBuilder(xdsC) + if err != nil { + t.Fatalf("Failed to create xDS resolver for testing: %v", err) + } + + // Start a gRPC client that uses the above xDS resolver. + cc, err := grpc.NewClient(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("Failed to create gRPC client: %v", err) + } + defer cc.Close() + cc.Connect() + + // Ensure that a connection is attempted to the primary. + if _, err := primaryWrappedLis.NewConnCh.Receive(ctx); err != nil { + t.Fatalf("Failure when waiting for a connection to be opened to the primary management server: %v", err) + } + + // Ensure that a connection is attempted to the fallback. + if _, err := fallbackWrappedLis.NewConnCh.Receive(ctx); err != nil { + t.Fatalf("Failure when waiting for a connection to be opened to the primary management server: %v", err) + } + + // Make an RPC with a shortish deadline and expect it to fail. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}, grpc.WaitForReady(true)); err == nil || status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = %v, want DeadlineExceeded", err) + } + + // Start the fallback server. Ensure that an RPC can succeed, and that it + // reaches backend2. + fallbackLis.Restart() + if err := waitForRPCsToReachBackend(ctx, client, backend2.Address); err != nil { + t.Fatal(err) + } + + // Start the primary server. It can take a while before the xDS client + // notices this, since the ADS stream implementation uses a backoff before + // retrying the stream. + primaryLis.Restart() + + // Wait for the connection to the secondary to be closed and ensure that an + // RPC can succeed, and that it reaches backend1. + c, err := fallbackWrappedLis.NewConnCh.Receive(ctx) + if err != nil { + t.Fatalf("Failure when retrieving the most recent connection to the fallback management server: %v", err) + } + conn := c.(*testutils.ConnWrapper) + if _, err := conn.CloseCh.Receive(ctx); err != nil { + t.Fatalf("Connection to fallback server not closed once primary becomes ready: %v", err) + } + if err := waitForRPCsToReachBackend(ctx, client, backend1.Address); err != nil { + t.Fatal(err) + } + + // Stop the primary servers. Since all xDS resources were received from the + // primary (and RPCs were succeeding to the clusters returned by the + // primary), we will not trigger fallback. + primaryLis.Stop() + sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := fallbackWrappedLis.NewConnCh.Receive(sCtx); err == nil { + t.Fatalf("Fallback attempted when not expected to. There are no uncached resources from the primary server at this point.") + } + + // Ensure that RPCs still succeed, and that they use the configuration + // received from the primary. + if err := waitForRPCsToReachBackend(ctx, client, backend1.Address); err != nil { + t.Fatal(err) + } +} + +// Tests fallback when the primary management server fails during an update. +func (s) TestFallback_MidUpdate(t *testing.T) { + // Enable fallback env var. + origFallbackEnv := envconfig.XDSFallbackSupport + envconfig.XDSFallbackSupport = true + defer func() { envconfig.XDSFallbackSupport = origFallbackEnv }() + + ctx, cancel := context.WithTimeout(context.Background(), defaultFallbackTestTimeout) + defer cancel() + + // Create two listeners for the two management servers. The test can + // start/stop these listeners and can also get notified when the listener + // receives a connection request. + primaryWrappedLis := testutils.NewListenerWrapper(t, nil) + primaryLis := testutils.NewRestartableListener(primaryWrappedLis) + fallbackWrappedLis := testutils.NewListenerWrapper(t, nil) + fallbackLis := testutils.NewRestartableListener(fallbackWrappedLis) + + // This boolean helps with triggering fallback mid update. When this boolean + // is set and the below defined cluster resource is requested, the primary + // management server shuts down the connection, forcing the client to + // fallback to the secondary server. + var closeConnOnMidUpdateClusterResource atomic.Bool + const ( + serviceName = "my-service-fallback-xds" + routeConfigName = "route-" + serviceName + clusterName = "cluster-" + serviceName + endpointsName = "endpoints-" + serviceName + midUpdateRouteConfigName = "mid-update-route-" + serviceName + midUpdateClusterName = "mid-update-cluster-" + serviceName + midUpdateEndpointsName = "mid-update-endpoints-" + serviceName + ) + + // Start two management servers, primary and fallback, with the above + // listeners. + primaryManagementServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{ + Listener: primaryLis, + OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { + if closeConnOnMidUpdateClusterResource.Load() == false { + return nil + } + if req.GetTypeUrl() != version.V3ClusterURL { + return nil + } + for _, name := range req.GetResourceNames() { + if name == midUpdateClusterName { + primaryLis.Stop() + return fmt.Errorf("closing ADS stream because %q resource was requested", midUpdateClusterName) + } + } + return nil + }, + AllowResourceSubset: true, + }) + fallbackManagementServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{Listener: fallbackLis}) + + // Start three test service backends. + backend1 := stubserver.StartTestService(t, nil) + defer backend1.Stop() + backend2 := stubserver.StartTestService(t, nil) + defer backend2.Stop() + backend3 := stubserver.StartTestService(t, nil) + defer backend3.Stop() + + // Configure xDS resource on the primary management server, with a cluster + // resource that contains an endpoint for backend1. + nodeID := uuid.New().String() + primaryResources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, routeConfigName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(routeConfigName, serviceName, clusterName)}, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, endpointsName, e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(endpointsName, "localhost", []uint32{testutils.ParsePort(t, backend1.Address)})}, + } + if err := primaryManagementServer.Update(ctx, primaryResources); err != nil { + t.Fatal(err) + } + + // Configure xDS resource on the secondary management server, with a cluster + // resource that contains an endpoint for backend2. Only the listener + // resource has the same name on both servers. + const ( + fallbackRouteConfigName = "fallback-route-" + serviceName + fallbackClusterName = "fallback-cluster-" + serviceName + fallbackEndpointsName = "fallback-endpoints-" + serviceName + ) + fallbackResources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, fallbackRouteConfigName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(fallbackRouteConfigName, serviceName, fallbackClusterName)}, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(fallbackClusterName, fallbackEndpointsName, e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(fallbackEndpointsName, "localhost", []uint32{testutils.ParsePort(t, backend2.Address)})}, + } + if err := fallbackManagementServer.Update(ctx, fallbackResources); err != nil { + t.Fatal(err) + } + + // Generate bootstrap configuration with the above two servers. + bootstrapContents, err := bootstrap.NewContentsForTesting(bootstrap.ConfigOptionsForTesting{ + Servers: []byte(fmt.Sprintf(`[ + { + "server_uri": %q, + "channel_creds": [{"type": "insecure"}] + }, + { + "server_uri": %q, + "channel_creds": [{"type": "insecure"}] + }]`, primaryManagementServer.Address, fallbackManagementServer.Address)), + Node: []byte(fmt.Sprintf(`{"id": "%s"}`, nodeID)), + }) + if err != nil { + t.Fatalf("Failed to create bootstrap file: %v", err) + } + + // Create an xDS client with the above bootstrap configuration and a short + // idle channel expiry timeout. This ensures that connections to lower + // priority servers get closed quickly, for the test to verify. + xdsC, close, err := xdsclient.NewForTesting(xdsclient.OptionsForTesting{ + Name: t.Name(), + Contents: bootstrapContents, + IdleChannelExpiryTimeout: defaultTestIdleChannelExpiryTimeout, + }) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Get the xDS resolver to use the above xDS client. + resolverBuilder := internal.NewXDSResolverWithClientForTesting.(func(xdsclient.XDSClient) (resolver.Builder, error)) + resolver, err := resolverBuilder(xdsC) + if err != nil { + t.Fatalf("Failed to create xDS resolver for testing: %v", err) + } + + // Start a gRPC client that uses the above xDS resolver. + cc, err := grpc.NewClient(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("Failed to create gRPC client: %v", err) + } + defer cc.Close() + cc.Connect() + + // Ensure that RPCs reach the cluster specified by the primary server and + // that no connection is attempted to the fallback server. + client := testgrpc.NewTestServiceClient(cc) + if err := waitForRPCsToReachBackend(ctx, client, backend1.Address); err != nil { + t.Fatal(err) + } + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := fallbackWrappedLis.NewConnCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatalf("Connection attempt made to fallback server when none expected: %v", err) + } + + // Instruct the primary server to close the connection if below defined + // cluster resource is requested. + closeConnOnMidUpdateClusterResource.Store(true) + + // Update the listener resource on the primary server to point to a new + // route configuration that points to a new cluster that points to a new + // endpoints resource that contains backend3. + primaryResources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, midUpdateRouteConfigName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(midUpdateRouteConfigName, serviceName, midUpdateClusterName)}, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(midUpdateClusterName, midUpdateEndpointsName, e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(midUpdateEndpointsName, "localhost", []uint32{testutils.ParsePort(t, backend3.Address)})}, + } + if err := primaryManagementServer.Update(ctx, primaryResources); err != nil { + t.Fatal(err) + } + + // Ensure that a connection is attempted to the fallback (because both + // conditions mentioned for fallback in A71 are satisfied: connectivity + // failure and a watcher for an uncached resource), and that RPCs are + // routed to the cluster returned by the fallback server. + c, err := fallbackWrappedLis.NewConnCh.Receive(ctx) + if err != nil { + t.Fatalf("Failure when waiting for a connection to be opened to the fallback management server: %v", err) + } + fallbackConn := c.(*testutils.ConnWrapper) + if err := waitForRPCsToReachBackend(ctx, client, backend2.Address); err != nil { + t.Fatal(err) + } + + // Set the primary management server to not close the connection anymore if + // the mid-update cluster resource is requested, and get it to start serving + // again. + closeConnOnMidUpdateClusterResource.Store(false) + primaryLis.Restart() + + // A new snapshot, with the same resources, is pushed to the management + // server to get it to respond for already requested resource names. + if err := primaryManagementServer.Update(ctx, primaryResources); err != nil { + t.Fatal(err) + } + + // Ensure that RPCs reach the backend pointed to by the new cluster. + if err := waitForRPCsToReachBackend(ctx, client, backend3.Address); err != nil { + t.Fatal(err) + } + + // Wait for the connection to the secondary to be closed since we have + // reverted back to the primary. + if _, err := fallbackConn.CloseCh.Receive(ctx); err != nil { + t.Fatalf("Connection to fallback server not closed once primary becomes ready: %v", err) + } +} + +// Tests fallback when the primary management server fails during startup. +func (s) TestFallback_MidStartup(t *testing.T) { + // Enable fallback env var. + origFallbackEnv := envconfig.XDSFallbackSupport + envconfig.XDSFallbackSupport = true + defer func() { envconfig.XDSFallbackSupport = origFallbackEnv }() + + ctx, cancel := context.WithTimeout(context.Background(), defaultFallbackTestTimeout) + defer cancel() + + // Create two listeners for the two management servers. The test can + // start/stop these listeners and can also get notified when the listener + // receives a connection request. + primaryWrappedLis := testutils.NewListenerWrapper(t, nil) + primaryLis := testutils.NewRestartableListener(primaryWrappedLis) + fallbackWrappedLis := testutils.NewListenerWrapper(t, nil) + fallbackLis := testutils.NewRestartableListener(fallbackWrappedLis) + + // This boolean helps with triggering fallback during startup. When this + // boolean is set and a cluster resource is requested, the primary + // management server shuts down the connection, forcing the client to + // fallback to the secondary server. + var closeConnOnClusterResource atomic.Bool + closeConnOnClusterResource.Store(true) + + // Start two management servers, primary and fallback, with the above + // listeners. + primaryManagementServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{ + Listener: primaryLis, + OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { + if closeConnOnClusterResource.Load() == false { + return nil + } + if req.GetTypeUrl() != version.V3ClusterURL { + return nil + } + primaryLis.Stop() + return fmt.Errorf("closing ADS stream because cluster resource was requested") + }, + AllowResourceSubset: true, + }) + fallbackManagementServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{Listener: fallbackLis}) + + // Start two test service backends. + backend1 := stubserver.StartTestService(t, nil) + defer backend1.Stop() + backend2 := stubserver.StartTestService(t, nil) + defer backend2.Stop() + + // Configure xDS resource on the primary management server, with a cluster + // resource that contains an endpoint for backend1. + nodeID := uuid.New().String() + const serviceName = "my-service-fallback-xds" + primaryResources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: testutils.ParsePort(t, backend1.Address), + SecLevel: e2e.SecurityLevelNone, + }) + if err := primaryManagementServer.Update(ctx, primaryResources); err != nil { + t.Fatal(err) + } + + // Configure xDS resource on the secondary management server, with a cluster + // resource that contains an endpoint for backend2. Only the listener + // resource has the same name on both servers. + fallbackRouteConfigName := "fallback-route-" + serviceName + fallbackClusterName := "fallback-cluster-" + serviceName + fallbackEndpointsName := "fallback-endpoints-" + serviceName + fallbackResources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, fallbackRouteConfigName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(fallbackRouteConfigName, serviceName, fallbackClusterName)}, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(fallbackClusterName, fallbackEndpointsName, e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(fallbackEndpointsName, "localhost", []uint32{testutils.ParsePort(t, backend2.Address)})}, + } + if err := fallbackManagementServer.Update(ctx, fallbackResources); err != nil { + t.Fatal(err) + } + + // Generate bootstrap configuration with the above two servers. + bootstrapContents, err := bootstrap.NewContentsForTesting(bootstrap.ConfigOptionsForTesting{ + Servers: []byte(fmt.Sprintf(`[ + { + "server_uri": %q, + "channel_creds": [{"type": "insecure"}] + }, + { + "server_uri": %q, + "channel_creds": [{"type": "insecure"}] + }]`, primaryManagementServer.Address, fallbackManagementServer.Address)), + Node: []byte(fmt.Sprintf(`{"id": "%s"}`, nodeID)), + }) + if err != nil { + t.Fatalf("Failed to create bootstrap file: %v", err) + } + + // Create an xDS client with the above bootstrap configuration and a short + // idle channel expiry timeout. This ensures that connections to lower + // priority servers get closed quickly, for the test to verify. + xdsC, close, err := xdsclient.NewForTesting(xdsclient.OptionsForTesting{ + Name: t.Name(), + Contents: bootstrapContents, + IdleChannelExpiryTimeout: defaultTestIdleChannelExpiryTimeout, + }) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Get the xDS resolver to use the above xDS client. + resolverBuilder := internal.NewXDSResolverWithClientForTesting.(func(xdsclient.XDSClient) (resolver.Builder, error)) + resolver, err := resolverBuilder(xdsC) + if err != nil { + t.Fatalf("Failed to create xDS resolver for testing: %v", err) + } + + // Start a gRPC client that uses the above xDS resolver. + cc, err := grpc.NewClient(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("Failed to create gRPC client: %v", err) + } + defer cc.Close() + cc.Connect() + + // Ensure that a connection is attempted to the primary. + if _, err := primaryWrappedLis.NewConnCh.Receive(ctx); err != nil { + t.Fatalf("Failure when waiting for a connection to be opened to the primary management server: %v", err) + } + + // Ensure that a connection is attempted to the fallback. + c, err := fallbackWrappedLis.NewConnCh.Receive(ctx) + if err != nil { + t.Fatalf("Failure when waiting for a connection to be opened to the secondary management server: %v", err) + } + fallbackConn := c.(*testutils.ConnWrapper) + + // Ensure that RPCs are routed to the cluster returned by the fallback + // management server. + client := testgrpc.NewTestServiceClient(cc) + if err := waitForRPCsToReachBackend(ctx, client, backend2.Address); err != nil { + t.Fatal(err) + } + + // Get the primary management server to no longer close the connection when + // the cluster resource is requested. + closeConnOnClusterResource.Store(false) + primaryLis.Restart() + + // A new snapshot, with the same resources, is pushed to the management + // server to get it to respond for already requested resource names. + if err := primaryManagementServer.Update(ctx, primaryResources); err != nil { + t.Fatal(err) + } + + // Ensure that RPCs are routed to the cluster returned by the primary + // management server. + if err := waitForRPCsToReachBackend(ctx, client, backend1.Address); err != nil { + t.Fatal(err) + } + + // Wait for the connection to the secondary to be closed since we have + // reverted back to the primary. + if _, err := fallbackConn.CloseCh.Receive(ctx); err != nil { + t.Fatalf("Connection to fallback server not closed once primary becomes ready: %v", err) + } +} From 5b40f07f8e0b4fbc701d49a23fa21ae6656915c8 Mon Sep 17 00:00:00 2001 From: Easwar Swaminathan Date: Thu, 7 Nov 2024 14:00:14 -0800 Subject: [PATCH 49/57] xdsclient: fix flaky test TestServeAndCloseDoNotRace (#7814) --- xds/internal/resolver/xds_resolver_test.go | 1 + xds/internal/xdsclient/authority.go | 11 ++++++++--- xds/internal/xdsclient/client_new.go | 8 ++++++-- xds/server_test.go | 19 +++++++++++++++---- 4 files changed, 30 insertions(+), 9 deletions(-) diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index 77e8c47e6cd5..8a1a7427068e 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -71,6 +71,7 @@ import ( // build fails as well. func (s) TestResolverBuilder_ClientCreationFails_NoBootstrap(t *testing.T) { // Build an xDS resolver without specifying bootstrap env vars. + bootstrap.UnsetFallbackBootstrapConfigForTesting() builder := resolver.Get(xdsresolver.Scheme) if builder == nil { t.Fatalf("Scheme %q is not registered", xdsresolver.Scheme) diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go index b9052ffe8d14..27abb64ef6d5 100644 --- a/xds/internal/xdsclient/authority.go +++ b/xds/internal/xdsclient/authority.go @@ -594,7 +594,7 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w cleanup := func() {} done := make(chan struct{}) - a.xdsClientSerializer.TrySchedule(func(context.Context) { + a.xdsClientSerializer.ScheduleOr(func(context.Context) { defer close(done) if a.logger.V(2) { @@ -642,6 +642,11 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w a.watcherCallbackSerializer.TrySchedule(func(context.Context) { watcher.OnUpdate(resource, func() {}) }) } cleanup = a.unwatchResource(rType, resourceName, watcher) + }, func() { + if a.logger.V(2) { + a.logger.Infof("Failed to schedule a watch for type %q, resource name %q, because the xDS client is closed", rType.TypeName(), resourceName) + } + close(done) }) <-done return cleanup @@ -764,10 +769,10 @@ func (a *authority) dumpResources() []*v3statuspb.ClientConfig_GenericXdsConfig var ret []*v3statuspb.ClientConfig_GenericXdsConfig done := make(chan struct{}) - a.xdsClientSerializer.TrySchedule(func(context.Context) { + a.xdsClientSerializer.ScheduleOr(func(context.Context) { defer close(done) ret = a.resourceConfig() - }) + }, func() { close(done) }) <-done return ret } diff --git a/xds/internal/xdsclient/client_new.go b/xds/internal/xdsclient/client_new.go index 82e549fda53a..fd5a8d57ea2d 100644 --- a/xds/internal/xdsclient/client_new.go +++ b/xds/internal/xdsclient/client_new.go @@ -129,6 +129,11 @@ type OptionsForTesting struct { // NewForTesting returns an xDS client configured with the provided options. // +// Sets the fallback bootstrap configuration to the contents in the +// opts.Contents field. This value persists for the life of the test binary. So, +// tests that want this value to be empty should call +// bootstrap.UnsetFallbackBootstrapConfigForTesting to ensure the same. +// // The second return value represents a close function which the caller is // expected to invoke once they are done using the client. It is safe for the // caller to invoke this close function multiple times. @@ -153,8 +158,7 @@ func NewForTesting(opts OptionsForTesting) (XDSClient, func(), error) { if err := bootstrap.SetFallbackBootstrapConfig(opts.Contents); err != nil { return nil, nil, err } - client, cancel, err := newRefCounted(opts.Name, opts.WatchExpiryTimeout, opts.IdleChannelExpiryTimeout, opts.StreamBackoffAfterFailure) - return client, func() { bootstrap.UnsetFallbackBootstrapConfigForTesting(); cancel() }, err + return newRefCounted(opts.Name, opts.WatchExpiryTimeout, opts.IdleChannelExpiryTimeout, opts.StreamBackoffAfterFailure) } // GetForTesting returns an xDS client created earlier using the given name. diff --git a/xds/server_test.go b/xds/server_test.go index 7a8dcbcc6627..405ded0a093e 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -176,9 +176,14 @@ func (s) TestNewServer_Failure(t *testing.T) { wantErr string }{ { - desc: "bootstrap env var not set", - serverOpts: []grpc.ServerOption{grpc.Creds(xdsCreds)}, - wantErr: "failed to get xDS bootstrap config", + desc: "bootstrap env var not set", + serverOpts: func() []grpc.ServerOption { + // Ensure that any fallback bootstrap configuration setup by + // previous tests is cleared. + bootstrap.UnsetFallbackBootstrapConfigForTesting() + return []grpc.ServerOption{grpc.Creds(xdsCreds)} + }(), + wantErr: "failed to get xDS bootstrap config", }, { desc: "empty bootstrap config", @@ -696,10 +701,16 @@ func (s) TestServeAndCloseDoNotRace(t *testing.T) { t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } + // Generate bootstrap contents up front for all servers, and clear the + // fallback bootstrap configuration that gets set when a server is created + // with the BootstrapContentsForTesting() server option. + bootstrapContents := generateBootstrapContents(t, uuid.NewString(), nonExistentManagementServer) + defer bootstrap.UnsetFallbackBootstrapConfigForTesting() + wg := sync.WaitGroup{} wg.Add(200) for i := 0; i < 100; i++ { - server, err := NewGRPCServer(BootstrapContentsForTesting(generateBootstrapContents(t, uuid.NewString(), nonExistentManagementServer))) + server, err := NewGRPCServer(BootstrapContentsForTesting(bootstrapContents)) if err != nil { t.Fatalf("Failed to create an xDS enabled gRPC server: %v", err) } From 74738cf4aa6c6953911b2d95b4bc95e876557e77 Mon Sep 17 00:00:00 2001 From: Arjan Singh Bal <46515553+arjan-bal@users.noreply.github.com> Date: Fri, 8 Nov 2024 11:27:35 +0530 Subject: [PATCH 50/57] grpc: Remove health check func dial option used for testing (#7820) --- clientconn.go | 4 +-- dialoptions.go | 16 ---------- internal/internal.go | 2 -- test/healthcheck_test.go | 65 +++++++++++++++++++--------------------- 4 files changed, 33 insertions(+), 54 deletions(-) diff --git a/clientconn.go b/clientconn.go index 091ccd952a66..4f57b55434f9 100644 --- a/clientconn.go +++ b/clientconn.go @@ -1445,7 +1445,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if !ac.scopts.HealthCheckEnabled { return } - healthCheckFunc := ac.cc.dopts.healthCheckFunc + healthCheckFunc := internal.HealthCheckFunc if healthCheckFunc == nil { // The health package is not imported to set health check function. // @@ -1477,7 +1477,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { } // Start the health checking stream. go func() { - err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + err := healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled") diff --git a/dialoptions.go b/dialoptions.go index 518692c3afb8..7494ae591f16 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -87,7 +87,6 @@ type dialOptions struct { disableServiceConfig bool disableRetry bool disableHealthCheck bool - healthCheckFunc internal.HealthChecker minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string @@ -445,10 +444,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp }) } -func init() { - internal.WithHealthCheckFunc = withHealthCheckFunc -} - // WithDialer returns a DialOption that specifies a function to use for dialing // network addresses. If FailOnNonTempDialError() is set to true, and an error // is returned by f, gRPC checks the error's Temporary() method to decide if it @@ -662,16 +657,6 @@ func WithDisableHealthCheck() DialOption { }) } -// withHealthCheckFunc replaces the default health check function with the -// provided one. It makes tests easier to change the health check function. -// -// For testing purpose only. -func withHealthCheckFunc(f internal.HealthChecker) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.healthCheckFunc = f - }) -} - func defaultDialOptions() dialOptions { return dialOptions{ copts: transport.ConnectOptions{ @@ -682,7 +667,6 @@ func defaultDialOptions() dialOptions { BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, - healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, defaultScheme: "dns", maxCallAttempts: defaultMaxCallAttempts, diff --git a/internal/internal.go b/internal/internal.go index 88900fa9bbc6..7fbfaacde9bf 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -29,8 +29,6 @@ import ( ) var ( - // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. diff --git a/test/healthcheck_test.go b/test/healthcheck_test.go index 6f0665863cc4..424682d09625 100644 --- a/test/healthcheck_test.go +++ b/test/healthcheck_test.go @@ -46,8 +46,6 @@ import ( testpb "google.golang.org/grpc/interop/grpc_testing" ) -var testHealthCheckFunc = internal.HealthCheckFunc - func newTestHealthServer() *testHealthServer { return newTestHealthServerWithWatchFunc(defaultWatchFunc) } @@ -119,14 +117,22 @@ func (s *testHealthServer) SetServingStatus(service string, status healthpb.Heal s.mu.Unlock() } -func setupHealthCheckWrapper() (hcEnterChan chan struct{}, hcExitChan chan struct{}, wrapper internal.HealthChecker) { +func setupHealthCheckWrapper(t *testing.T) (hcEnterChan chan struct{}, hcExitChan chan struct{}) { + t.Helper() + hcEnterChan = make(chan struct{}) hcExitChan = make(chan struct{}) - wrapper = func(ctx context.Context, newStream func(string) (any, error), update func(connectivity.State, error), service string) error { + origHealthCheckFn := internal.HealthCheckFunc + internal.HealthCheckFunc = func(ctx context.Context, newStream func(string) (any, error), update func(connectivity.State, error), service string) error { close(hcEnterChan) defer close(hcExitChan) - return testHealthCheckFunc(ctx, newStream, update, service) + return origHealthCheckFn(ctx, newStream, update, service) } + + t.Cleanup(func() { + internal.HealthCheckFunc = origHealthCheckFn + }) + return } @@ -153,9 +159,8 @@ func setupServer(t *testing.T, watchFunc healthWatchFunc) (*grpc.Server, net.Lis } type clientConfig struct { - balancerName string - testHealthCheckFuncWrapper internal.HealthChecker - extraDialOption []grpc.DialOption + balancerName string + extraDialOption []grpc.DialOption } func setupClient(t *testing.T, c *clientConfig) (*grpc.ClientConn, *manual.Resolver) { @@ -170,9 +175,6 @@ func setupClient(t *testing.T, c *clientConfig) (*grpc.ClientConn, *manual.Resol if c.balancerName != "" { opts = append(opts, grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, c.balancerName))) } - if c.testHealthCheckFuncWrapper != nil { - opts = append(opts, internal.WithHealthCheckFunc.(func(internal.HealthChecker) grpc.DialOption)(c.testHealthCheckFuncWrapper)) - } opts = append(opts, c.extraDialOption...) } @@ -281,8 +283,8 @@ func (s) TestHealthCheckWithGoAway(t *testing.T) { s, lis, ts := setupServer(t, nil) ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) - hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) + hcEnterChan, hcExitChan := setupHealthCheckWrapper(t) + cc, r := setupClient(t, &clientConfig{}) tc := testgrpc.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, @@ -359,8 +361,8 @@ func (s) TestHealthCheckWithConnClose(t *testing.T) { s, lis, ts := setupServer(t, nil) ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) - hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) + hcEnterChan, hcExitChan := setupHealthCheckWrapper(t) + cc, r := setupClient(t, &clientConfig{}) tc := testgrpc.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, @@ -409,8 +411,8 @@ func (s) TestHealthCheckWithAddrConnDrain(t *testing.T) { _, lis, ts := setupServer(t, nil) ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) - hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) + hcEnterChan, hcExitChan := setupHealthCheckWrapper(t) + cc, r := setupClient(t, &clientConfig{}) tc := testgrpc.NewTestServiceClient(cc) sc := parseServiceConfig(t, r, `{ "healthCheckConfig": { @@ -489,8 +491,8 @@ func (s) TestHealthCheckWithClientConnClose(t *testing.T) { _, lis, ts := setupServer(t, nil) ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) - hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) + hcEnterChan, hcExitChan := setupHealthCheckWrapper(t) + cc, r := setupClient(t, &clientConfig{}) tc := testgrpc.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, @@ -555,8 +557,8 @@ func (s) TestHealthCheckWithoutSetConnectivityStateCalledAddrConnShutDown(t *tes _, lis, ts := setupServer(t, watchFunc) ts.SetServingStatus("delay", healthpb.HealthCheckResponse_SERVING) - hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - _, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) + hcEnterChan, hcExitChan := setupHealthCheckWrapper(t) + _, r := setupClient(t, &clientConfig{}) // The serviceName "delay" is specially handled at server side, where response will not be sent // back to client immediately upon receiving the request (client should receive no response until @@ -618,8 +620,8 @@ func (s) TestHealthCheckWithoutSetConnectivityStateCalled(t *testing.T) { s, lis, ts := setupServer(t, watchFunc) ts.SetServingStatus("delay", healthpb.HealthCheckResponse_SERVING) - hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - _, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) + hcEnterChan, hcExitChan := setupHealthCheckWrapper(t) + _, r := setupClient(t, &clientConfig{}) // The serviceName "delay" is specially handled at server side, where response will not be sent // back to client immediately upon receiving the request (client should receive no response until @@ -659,11 +661,8 @@ func (s) TestHealthCheckWithoutSetConnectivityStateCalled(t *testing.T) { } func testHealthCheckDisableWithDialOption(t *testing.T, addr string) { - hcEnterChan, _, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - cc, r := setupClient(t, &clientConfig{ - testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, - extraDialOption: []grpc.DialOption{grpc.WithDisableHealthCheck()}, - }) + hcEnterChan, _ := setupHealthCheckWrapper(t) + cc, r := setupClient(t, &clientConfig{extraDialOption: []grpc.DialOption{grpc.WithDisableHealthCheck()}}) tc := testgrpc.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: addr}}, @@ -694,10 +693,8 @@ func testHealthCheckDisableWithDialOption(t *testing.T, addr string) { } func testHealthCheckDisableWithBalancer(t *testing.T, addr string) { - hcEnterChan, _, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - cc, r := setupClient(t, &clientConfig{ - testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, - }) + hcEnterChan, _ := setupHealthCheckWrapper(t) + cc, r := setupClient(t, &clientConfig{}) tc := testgrpc.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: addr}}, @@ -728,8 +725,8 @@ func testHealthCheckDisableWithBalancer(t *testing.T, addr string) { } func testHealthCheckDisableWithServiceConfig(t *testing.T, addr string) { - hcEnterChan, _, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) + hcEnterChan, _ := setupHealthCheckWrapper(t) + cc, r := setupClient(t, &clientConfig{}) tc := testgrpc.NewTestServiceClient(cc) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: addr}}}) From a3a8657078983be4c5e9e9e856c757a5de9b3a45 Mon Sep 17 00:00:00 2001 From: Abhishek Ranjan <159750762+aranjans@users.noreply.github.com> Date: Fri, 8 Nov 2024 11:30:52 +0530 Subject: [PATCH 51/57] clusterimpl: update picker synchronously on config update (#7652) --- .../balancer/clusterimpl/balancer_test.go | 68 ++++ .../balancer/clusterimpl/clusterimpl.go | 296 ++++++++---------- xds/internal/balancer/clusterimpl/picker.go | 11 - 3 files changed, 204 insertions(+), 171 deletions(-) diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index c2e1253c8f4d..f1e99fab9bf9 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -943,6 +943,74 @@ func (s) TestFailedToParseChildPolicyConfig(t *testing.T) { } } +// Test verify that the case picker is updated synchronously on receipt of +// configuration update. +func (s) TestPickerUpdatedSynchronouslyOnConfigUpdate(t *testing.T) { + // Override the pickerUpdateHook to be notified that picker was updated. + pickerUpdated := make(chan struct{}, 1) + origNewPickerUpdated := pickerUpdateHook + pickerUpdateHook = func() { + pickerUpdated <- struct{}{} + } + defer func() { pickerUpdateHook = origNewPickerUpdated }() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Override the clientConnUpdateHook to ensure client conn was updated. + clientConnUpdateDone := make(chan struct{}, 1) + origClientConnUpdateHook := clientConnUpdateHook + clientConnUpdateHook = func() { + // Verify that picker was updated before the completion of + // client conn update. + select { + case <-pickerUpdated: + case <-ctx.Done(): + t.Fatal("Client conn update completed before picker update.") + } + clientConnUpdateDone <- struct{}{} + } + defer func() { clientConnUpdateHook = origClientConnUpdateHook }() + + defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) + xdsC := fakeclient.NewClient() + + builder := balancer.Get(Name) + cc := testutils.NewBalancerClientConn(t) + b := builder.Build(cc, balancer.BuildOptions{}) + defer b.Close() + + // Create a stub balancer which waits for the cluster_impl policy to be + // closed before sending a picker update (upon receipt of a resolver + // update). + stub.Register(t.Name(), stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error { + bd.ClientConn.UpdateState(balancer.State{ + Picker: base.NewErrPicker(errors.New("dummy error picker")), + }) + return nil + }, + }) + + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), + BalancerConfig: &LBConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: t.Name(), + }, + }, + }); err != nil { + t.Fatalf("Unexpected error from UpdateClientConnState: %v", err) + } + + select { + case <-clientConnUpdateDone: + case <-ctx.Done(): + t.Fatal("Timed out waiting for client conn update to be completed.") + } +} + func assertString(f func() (string, error)) string { s, err := f() if err != nil { diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index bbd754c3f4c0..7f4218d1b3cf 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -24,7 +24,6 @@ package clusterimpl import ( - "context" "encoding/json" "fmt" "sync" @@ -35,7 +34,6 @@ import ( "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/xds" "google.golang.org/grpc/internal/xds/bootstrap" @@ -54,8 +52,11 @@ const ( ) var ( - connectedAddress = internal.ConnectedAddress.(func(balancer.SubConnState) resolver.Address) - errBalancerClosed = fmt.Errorf("%s LB policy is closed", Name) + connectedAddress = internal.ConnectedAddress.(func(balancer.SubConnState) resolver.Address) + // Below function is no-op in actual code, but can be overridden in + // tests to give tests visibility into exactly when certain events happen. + clientConnUpdateHook = func() {} + pickerUpdateHook = func() {} ) func init() { @@ -65,14 +66,10 @@ func init() { type bb struct{} func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { - ctx, cancel := context.WithCancel(context.Background()) b := &clusterImplBalancer{ - ClientConn: cc, - bOpts: bOpts, - loadWrapper: loadstore.NewWrapper(), - requestCountMax: defaultRequestCountMax, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, + ClientConn: cc, + loadWrapper: loadstore.NewWrapper(), + requestCountMax: defaultRequestCountMax, } b.logger = prefixLogger(b) b.child = gracefulswitch.NewBalancer(b, bOpts) @@ -91,33 +88,75 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err type clusterImplBalancer struct { balancer.ClientConn - bOpts balancer.BuildOptions - logger *grpclog.PrefixLogger - xdsClient xdsclient.XDSClient - - config *LBConfig + // The following fields are set at creation time, and are read-only after + // that, and therefore need not be protected by a mutex. + logger *grpclog.PrefixLogger + loadWrapper *loadstore.Wrapper + + // The following fields are only accessed from balancer API methods, which + // are guaranteed to be called serially by gRPC. + xdsClient xdsclient.XDSClient // Sent down in ResolverState attributes. + cancelLoadReport func() // To stop reporting load through the above xDS client. + edsServiceName string // EDS service name to report load for. + lrsServer *bootstrap.ServerConfig // Load reporting server configuration. + dropCategories []DropConfig // The categories for drops. child *gracefulswitch.Balancer - cancelLoadReport func() - edsServiceName string - lrsServer *bootstrap.ServerConfig - loadWrapper *loadstore.Wrapper - - clusterNameMu sync.Mutex - clusterName string - - serializer *grpcsync.CallbackSerializer - serializerCancel context.CancelFunc - - // childState/drops/requestCounter keeps the state used by the most recently - // generated picker. - childState balancer.State - dropCategories []DropConfig // The categories for drops. - drops []*dropper - requestCounterCluster string // The cluster name for the request counter. - requestCounterService string // The service name for the request counter. - requestCounter *xdsclient.ClusterRequestsCounter - requestCountMax uint32 - telemetryLabels map[string]string + + // The following fields are protected by mu, since they are accessed in + // balancer API methods and in methods called from the child policy. + mu sync.Mutex + clusterName string // The cluster name for credentials handshaking. + inhibitPickerUpdates bool // Inhibits state updates from child policy when processing an update from the parent. + childState balancer.State // Most recent state update from the child policy. + drops []*dropper // Drops implementation. + requestCounterCluster string // The cluster name for the request counter, from LB config. + requestCounterService string // The service name for the request counter, from LB config. + requestCountMax uint32 // Max concurrent requests, from LB config. + requestCounter *xdsclient.ClusterRequestsCounter // Tracks total inflight requests for a given service. + telemetryLabels map[string]string // Telemetry labels to set on picks, from LB config. +} + +// handleDropAndRequestCountLocked compares drop and request counter in newConfig with +// the one currently used by picker, and is protected by b.mu. It returns a boolean +// indicating if a new picker needs to be generated. +func (b *clusterImplBalancer) handleDropAndRequestCountLocked(newConfig *LBConfig) bool { + var updatePicker bool + if !equalDropCategories(b.dropCategories, newConfig.DropCategories) { + b.dropCategories = newConfig.DropCategories + b.drops = make([]*dropper, 0, len(newConfig.DropCategories)) + for _, c := range newConfig.DropCategories { + b.drops = append(b.drops, newDropper(c)) + } + updatePicker = true + } + + if b.requestCounterCluster != newConfig.Cluster || b.requestCounterService != newConfig.EDSServiceName { + b.requestCounterCluster = newConfig.Cluster + b.requestCounterService = newConfig.EDSServiceName + b.requestCounter = xdsclient.GetClusterRequestsCounter(newConfig.Cluster, newConfig.EDSServiceName) + updatePicker = true + } + var newRequestCountMax uint32 = 1024 + if newConfig.MaxConcurrentRequests != nil { + newRequestCountMax = *newConfig.MaxConcurrentRequests + } + if b.requestCountMax != newRequestCountMax { + b.requestCountMax = newRequestCountMax + updatePicker = true + } + + return updatePicker +} + +func (b *clusterImplBalancer) newPickerLocked() *picker { + return &picker{ + drops: b.drops, + s: b.childState, + loadStore: b.loadWrapper, + counter: b.requestCounter, + countMax: b.requestCountMax, + telemetryLabels: b.telemetryLabels, + } } // updateLoadStore checks the config for load store, and decides whether it @@ -198,7 +237,12 @@ func (b *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { return nil } -func (b *clusterImplBalancer) updateClientConnState(s balancer.ClientConnState) error { +func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + defer clientConnUpdateHook() + + b.mu.Lock() + b.inhibitPickerUpdates = true + b.mu.Unlock() if b.logger.V(2) { b.logger.Infof("Received configuration: %s", pretty.ToJSON(s.BalancerConfig)) } @@ -241,43 +285,28 @@ func (b *clusterImplBalancer) updateClientConnState(s balancer.ClientConnState) return err } - b.config = newConfig - - b.telemetryLabels = newConfig.TelemetryLabels - dc := b.handleDropAndRequestCount(newConfig) - if dc != nil && b.childState.Picker != nil { - b.ClientConn.UpdateState(balancer.State{ - ConnectivityState: b.childState.ConnectivityState, - Picker: b.newPicker(dc), - }) - } - // Addresses and sub-balancer config are sent to sub-balancer. - return b.child.UpdateClientConnState(balancer.ClientConnState{ + err = b.child.UpdateClientConnState(balancer.ClientConnState{ ResolverState: s.ResolverState, BalancerConfig: parsedCfg, }) -} -func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - // Handle the update in a blocking fashion. - errCh := make(chan error, 1) - callback := func(context.Context) { - errCh <- b.updateClientConnState(s) - } - onFailure := func() { - // An attempt to schedule callback fails only when an update is received - // after Close(). - errCh <- errBalancerClosed + b.mu.Lock() + b.telemetryLabels = newConfig.TelemetryLabels + if b.handleDropAndRequestCountLocked(newConfig) && b.childState.Picker != nil { + b.ClientConn.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: b.newPickerLocked(), + }) } - b.serializer.ScheduleOr(callback, onFailure) - return <-errCh + b.inhibitPickerUpdates = false + b.mu.Unlock() + pickerUpdateHook() + return err } func (b *clusterImplBalancer) ResolverError(err error) { - b.serializer.TrySchedule(func(context.Context) { - b.child.ResolverError(err) - }) + b.child.ResolverError(err) } func (b *clusterImplBalancer) updateSubConnState(_ balancer.SubConn, s balancer.SubConnState, cb func(balancer.SubConnState)) { @@ -302,51 +331,50 @@ func (b *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer } func (b *clusterImplBalancer) Close() { - b.serializer.TrySchedule(func(_ context.Context) { - b.child.Close() - b.childState = balancer.State{} + b.child.Close() + b.childState = balancer.State{} - if b.cancelLoadReport != nil { - b.cancelLoadReport() - b.cancelLoadReport = nil - } - b.logger.Infof("Shutdown") - }) - b.serializerCancel() - <-b.serializer.Done() + if b.cancelLoadReport != nil { + b.cancelLoadReport() + b.cancelLoadReport = nil + } + b.logger.Infof("Shutdown") } func (b *clusterImplBalancer) ExitIdle() { - b.serializer.TrySchedule(func(context.Context) { - b.child.ExitIdle() - }) + b.child.ExitIdle() } // Override methods to accept updates from the child LB. func (b *clusterImplBalancer) UpdateState(state balancer.State) { - b.serializer.TrySchedule(func(context.Context) { - b.childState = state - b.ClientConn.UpdateState(balancer.State{ - ConnectivityState: b.childState.ConnectivityState, - Picker: b.newPicker(&dropConfigs{ - drops: b.drops, - requestCounter: b.requestCounter, - requestCountMax: b.requestCountMax, - }), - }) + b.mu.Lock() + defer b.mu.Unlock() + + // Inhibit sending a picker update to our parent as part of handling new + // state from the child, if we are currently handling an update from our + // parent. Update the childState field regardless. + b.childState = state + if b.inhibitPickerUpdates { + return + } + + b.ClientConn.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: b.newPickerLocked(), }) + pickerUpdateHook() } func (b *clusterImplBalancer) setClusterName(n string) { - b.clusterNameMu.Lock() - defer b.clusterNameMu.Unlock() + b.mu.Lock() + defer b.mu.Unlock() b.clusterName = n } func (b *clusterImplBalancer) getClusterName() string { - b.clusterNameMu.Lock() - defer b.clusterNameMu.Unlock() + b.mu.Lock() + defer b.mu.Unlock() return b.clusterName } @@ -387,23 +415,21 @@ func (b *clusterImplBalancer) NewSubConn(addrs []resolver.Address, opts balancer scw := &scWrapper{} oldListener := opts.StateListener opts.StateListener = func(state balancer.SubConnState) { - b.serializer.TrySchedule(func(context.Context) { - b.updateSubConnState(sc, state, oldListener) - if state.ConnectivityState != connectivity.Ready { - return - } - // Read connected address and call updateLocalityID() based on the connected - // address's locality. https://github.com/grpc/grpc-go/issues/7339 - addr := connectedAddress(state) - lID := xdsinternal.GetLocalityID(addr) - if lID.Empty() { - if b.logger.V(2) { - b.logger.Infof("Locality ID for %s unexpectedly empty", addr) - } - return + b.updateSubConnState(sc, state, oldListener) + if state.ConnectivityState != connectivity.Ready { + return + } + // Read connected address and call updateLocalityID() based on the connected + // address's locality. https://github.com/grpc/grpc-go/issues/7339 + addr := connectedAddress(state) + lID := xdsinternal.GetLocalityID(addr) + if lID.Empty() { + if b.logger.V(2) { + b.logger.Infof("Locality ID for %s unexpectedly empty", addr) } - scw.updateLocalityID(lID) - }) + return + } + scw.updateLocalityID(lID) } sc, err := b.ClientConn.NewSubConn(newAddrs, opts) if err != nil { @@ -433,53 +459,3 @@ func (b *clusterImplBalancer) UpdateAddresses(sc balancer.SubConn, addrs []resol } b.ClientConn.UpdateAddresses(sc, newAddrs) } - -type dropConfigs struct { - drops []*dropper - requestCounter *xdsclient.ClusterRequestsCounter - requestCountMax uint32 -} - -// handleDropAndRequestCount compares drop and request counter in newConfig with -// the one currently used by picker. It returns a new dropConfigs if a new -// picker needs to be generated, otherwise it returns nil. -func (b *clusterImplBalancer) handleDropAndRequestCount(newConfig *LBConfig) *dropConfigs { - // Compare new drop config. And update picker if it's changed. - var updatePicker bool - if !equalDropCategories(b.dropCategories, newConfig.DropCategories) { - b.dropCategories = newConfig.DropCategories - b.drops = make([]*dropper, 0, len(newConfig.DropCategories)) - for _, c := range newConfig.DropCategories { - b.drops = append(b.drops, newDropper(c)) - } - updatePicker = true - } - - // Compare cluster name. And update picker if it's changed, because circuit - // breaking's stream counter will be different. - if b.requestCounterCluster != newConfig.Cluster || b.requestCounterService != newConfig.EDSServiceName { - b.requestCounterCluster = newConfig.Cluster - b.requestCounterService = newConfig.EDSServiceName - b.requestCounter = xdsclient.GetClusterRequestsCounter(newConfig.Cluster, newConfig.EDSServiceName) - updatePicker = true - } - // Compare upper bound of stream count. And update picker if it's changed. - // This is also for circuit breaking. - var newRequestCountMax uint32 = 1024 - if newConfig.MaxConcurrentRequests != nil { - newRequestCountMax = *newConfig.MaxConcurrentRequests - } - if b.requestCountMax != newRequestCountMax { - b.requestCountMax = newRequestCountMax - updatePicker = true - } - - if !updatePicker { - return nil - } - return &dropConfigs{ - drops: b.drops, - requestCounter: b.requestCounter, - requestCountMax: b.requestCountMax, - } -} diff --git a/xds/internal/balancer/clusterimpl/picker.go b/xds/internal/balancer/clusterimpl/picker.go index fbadbb92ba39..dd4d39b3d398 100644 --- a/xds/internal/balancer/clusterimpl/picker.go +++ b/xds/internal/balancer/clusterimpl/picker.go @@ -87,17 +87,6 @@ type picker struct { telemetryLabels map[string]string } -func (b *clusterImplBalancer) newPicker(config *dropConfigs) *picker { - return &picker{ - drops: config.drops, - s: b.childState, - loadStore: b.loadWrapper, - counter: config.requestCounter, - countMax: config.requestCountMax, - telemetryLabels: b.telemetryLabels, - } -} - func telemetryLabels(ctx context.Context) map[string]string { if ctx == nil { return nil From 0d0e5308485f563e58bc559dd07a5bb652b544b2 Mon Sep 17 00:00:00 2001 From: Muhammed Jishin Jamal TCP <83187455+mohdjishin@users.noreply.github.com> Date: Mon, 11 Nov 2024 13:35:04 +0530 Subject: [PATCH 52/57] grpc: export MethodHandler #7794 (#7796) --- server.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server.go b/server.go index 23a1660057e8..05f0c6e2ad79 100644 --- a/server.go +++ b/server.go @@ -87,12 +87,13 @@ func init() { var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) +// MethodHandler is a function type that processes a unary RPC method call. +type MethodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { MethodName string - Handler methodHandler + Handler MethodHandler } // ServiceDesc represents an RPC service's specification. From c2a2d20f7fb7e3c0465ced6e08924c3304dde835 Mon Sep 17 00:00:00 2001 From: hanut19 <50198451+hanut19@users.noreply.github.com> Date: Mon, 11 Nov 2024 13:47:16 +0530 Subject: [PATCH 53/57] docs: update documentation for `ClientStream.SendMsg()` returning `nil` unconditionally when `ClientStreams=false` (#7790) --- stream.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stream.go b/stream.go index 34c846a436b5..722209cc6670 100644 --- a/stream.go +++ b/stream.go @@ -113,7 +113,9 @@ type ClientStream interface { // SendMsg is generally called by generated code. On error, SendMsg aborts // the stream. If the error was generated by the client, the status is // returned directly; otherwise, io.EOF is returned and the status of - // the stream may be discovered using RecvMsg. + // the stream may be discovered using RecvMsg. For unary or server-streaming + // RPCs (StreamDesc.ClientStreams is false), a nil error is returned + // unconditionally. // // SendMsg blocks until: // - There is sufficient flow control to schedule m with the transport, or From d2c1aae4c8990253793c2c7d368c4d711ad25f92 Mon Sep 17 00:00:00 2001 From: Zach Reyes <39203661+zasweq@users.noreply.github.com> Date: Mon, 11 Nov 2024 14:55:54 -0500 Subject: [PATCH 54/57] xds: Plumb EDS endpoints through xDS Balancer Tree (#7816) --- balancer/weightedtarget/weightedtarget.go | 2 + internal/hierarchy/hierarchy.go | 59 +++++++++++++++++++ .../balancer/clustermanager/clustermanager.go | 2 + .../clusterresolver/clusterresolver.go | 1 - xds/internal/balancer/priority/balancer.go | 3 + 5 files changed, 66 insertions(+), 1 deletion(-) diff --git a/balancer/weightedtarget/weightedtarget.go b/balancer/weightedtarget/weightedtarget.go index c004e112c40a..a617f6a63a22 100644 --- a/balancer/weightedtarget/weightedtarget.go +++ b/balancer/weightedtarget/weightedtarget.go @@ -108,6 +108,7 @@ func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } addressesSplit := hierarchy.Group(s.ResolverState.Addresses) + endpointsSplit := hierarchy.GroupEndpoints(s.ResolverState.Endpoints) b.stateAggregator.PauseStateUpdates() defer b.stateAggregator.ResumeStateUpdates() @@ -155,6 +156,7 @@ func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat _ = b.bg.UpdateClientConnState(name, balancer.ClientConnState{ ResolverState: resolver.State{ Addresses: addressesSplit[name], + Endpoints: endpointsSplit[name], ServiceConfig: s.ResolverState.ServiceConfig, Attributes: s.ResolverState.Attributes.WithValue(localityKey, name), }, diff --git a/internal/hierarchy/hierarchy.go b/internal/hierarchy/hierarchy.go index c3baac3643ce..362c05fa2aa6 100644 --- a/internal/hierarchy/hierarchy.go +++ b/internal/hierarchy/hierarchy.go @@ -48,6 +48,18 @@ func (p pathValue) Equal(o any) bool { return true } +// FromEndpoint returns the hierarchical path of endpoint. +func FromEndpoint(endpoint resolver.Endpoint) []string { + path, _ := endpoint.Attributes.Value(pathKey).(pathValue) + return path +} + +// SetInEndpoint overrides the hierarchical path in endpoint with path. +func SetInEndpoint(endpoint resolver.Endpoint, path []string) resolver.Endpoint { + endpoint.Attributes = endpoint.Attributes.WithValue(pathKey, pathValue(path)) + return endpoint +} + // Get returns the hierarchical path of addr. func Get(addr resolver.Address) []string { attrs := addr.BalancerAttributes @@ -110,3 +122,50 @@ func Group(addrs []resolver.Address) map[string][]resolver.Address { } return ret } + +// GroupEndpoints splits a slice of endpoints into groups based on +// the first hierarchy path. The first hierarchy path will be removed from the +// result. +// +// Input: +// [ +// +// {endpoint0, path: [p0, wt0]} +// {endpoint1, path: [p0, wt1]} +// {endpoint2, path: [p1, wt2]} +// {endpoint3, path: [p1, wt3]} +// +// ] +// +// Endpoints will be split into p0/p1, and the p0/p1 will be removed from the +// path. +// +// Output: +// +// { +// p0: [ +// {endpoint0, path: [wt0]}, +// {endpoint1, path: [wt1]}, +// ], +// p1: [ +// {endpoint2, path: [wt2]}, +// {endpoint3, path: [wt3]}, +// ], +// } +// +// If hierarchical path is not set, or has no path in it, the endpoint is +// dropped. +func GroupEndpoints(endpoints []resolver.Endpoint) map[string][]resolver.Endpoint { + ret := make(map[string][]resolver.Endpoint) + for _, endpoint := range endpoints { + oldPath := FromEndpoint(endpoint) + if len(oldPath) == 0 { + continue + } + curPath := oldPath[0] + newPath := oldPath[1:] + newEndpoint := SetInEndpoint(endpoint, newPath) + ret[curPath] = append(ret[curPath], newEndpoint) + } + return ret +} diff --git a/xds/internal/balancer/clustermanager/clustermanager.go b/xds/internal/balancer/clustermanager/clustermanager.go index ef5b34ea4451..24ad2399ddd4 100644 --- a/xds/internal/balancer/clustermanager/clustermanager.go +++ b/xds/internal/balancer/clustermanager/clustermanager.go @@ -87,6 +87,7 @@ func (b *bal) updateChildren(s balancer.ClientConnState, newConfig *lbConfig) er // TODO: Get rid of handling hierarchy in addresses. This LB policy never // gets addresses from the resolver. addressesSplit := hierarchy.Group(s.ResolverState.Addresses) + endpointsSplit := hierarchy.GroupEndpoints(s.ResolverState.Endpoints) // Remove sub-balancers that are not in the new list from the aggregator and // balancergroup. @@ -139,6 +140,7 @@ func (b *bal) updateChildren(s balancer.ClientConnState, newConfig *lbConfig) er if err := b.bg.UpdateClientConnState(childName, balancer.ClientConnState{ ResolverState: resolver.State{ Addresses: addressesSplit[childName], + Endpoints: endpointsSplit[childName], ServiceConfig: s.ResolverState.ServiceConfig, Attributes: s.ResolverState.Attributes, }, diff --git a/xds/internal/balancer/clusterresolver/clusterresolver.go b/xds/internal/balancer/clusterresolver/clusterresolver.go index 3b996989689e..ae2c5fe957a2 100644 --- a/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -252,7 +252,6 @@ func (b *clusterResolverBalancer) updateChildConfig() { for i, a := range addrs { endpoints[i].Attributes = a.BalancerAttributes endpoints[i].Addresses = []resolver.Address{a} - endpoints[i].Addresses[0].BalancerAttributes = nil } if err := b.child.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ diff --git a/xds/internal/balancer/priority/balancer.go b/xds/internal/balancer/priority/balancer.go index c17c62f23a59..ba3fe52e5c0f 100644 --- a/xds/internal/balancer/priority/balancer.go +++ b/xds/internal/balancer/priority/balancer.go @@ -123,6 +123,7 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } addressesSplit := hierarchy.Group(s.ResolverState.Addresses) + endpointsSplit := hierarchy.GroupEndpoints(s.ResolverState.Endpoints) b.mu.Lock() // Create and remove children, since we know all children from the config @@ -142,6 +143,7 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err cb := newChildBalancer(name, b, bb.Name(), b.cc) cb.updateConfig(newSubConfig, resolver.State{ Addresses: addressesSplit[name], + Endpoints: endpointsSplit[name], ServiceConfig: s.ResolverState.ServiceConfig, Attributes: s.ResolverState.Attributes, }) @@ -163,6 +165,7 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err // be built, if it's a low priority). currentChild.updateConfig(newSubConfig, resolver.State{ Addresses: addressesSplit[name], + Endpoints: endpointsSplit[name], ServiceConfig: s.ResolverState.ServiceConfig, Attributes: s.ResolverState.Attributes, }) From 60c70a436124c25a5aa3a3b55464d01c22e7ea64 Mon Sep 17 00:00:00 2001 From: Mikhail Mazurskiy <126021+ash2k@users.noreply.github.com> Date: Tue, 12 Nov 2024 11:02:57 +1100 Subject: [PATCH 55/57] mem: implement `ReadAll()` for more efficient `io.Reader` consumption (#7653) --- mem/buffer_slice.go | 59 +++++++- mem/buffer_slice_test.go | 308 +++++++++++++++++++++++++++++++++++++++ rpc_util.go | 3 +- 3 files changed, 366 insertions(+), 4 deletions(-) diff --git a/mem/buffer_slice.go b/mem/buffer_slice.go index 228e9c2f20f2..65002e2cc851 100644 --- a/mem/buffer_slice.go +++ b/mem/buffer_slice.go @@ -22,6 +22,11 @@ import ( "io" ) +const ( + // 32 KiB is what io.Copy uses. + readAllBufSize = 32 * 1024 +) + // BufferSlice offers a means to represent data that spans one or more Buffer // instances. A BufferSlice is meant to be immutable after creation, and methods // like Ref create and return copies of the slice. This is why all methods have @@ -219,8 +224,58 @@ func (w *writer) Write(p []byte) (n int, err error) { // NewWriter wraps the given BufferSlice and BufferPool to implement the // io.Writer interface. Every call to Write copies the contents of the given -// buffer into a new Buffer pulled from the given pool and the Buffer is added to -// the given BufferSlice. +// buffer into a new Buffer pulled from the given pool and the Buffer is +// added to the given BufferSlice. func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { return &writer{buffers: buffers, pool: pool} } + +// ReadAll reads from r until an error or EOF and returns the data it read. +// A successful call returns err == nil, not err == EOF. Because ReadAll is +// defined to read from src until EOF, it does not treat an EOF from Read +// as an error to be reported. +// +// Important: A failed call returns a non-nil error and may also return +// partially read buffers. It is the responsibility of the caller to free the +// BufferSlice returned, or its memory will not be reused. +func ReadAll(r io.Reader, pool BufferPool) (BufferSlice, error) { + var result BufferSlice + if wt, ok := r.(io.WriterTo); ok { + // This is more optimal since wt knows the size of chunks it wants to + // write and, hence, we can allocate buffers of an optimal size to fit + // them. E.g. might be a single big chunk, and we wouldn't chop it + // into pieces. + w := NewWriter(&result, pool) + _, err := wt.WriteTo(w) + return result, err + } +nextBuffer: + for { + buf := pool.Get(readAllBufSize) + // We asked for 32KiB but may have been given a bigger buffer. + // Use all of it if that's the case. + *buf = (*buf)[:cap(*buf)] + usedCap := 0 + for { + n, err := r.Read((*buf)[usedCap:]) + usedCap += n + if err != nil { + if usedCap == 0 { + // Nothing in this buf, put it back + pool.Put(buf) + } else { + *buf = (*buf)[:usedCap] + result = append(result, NewBuffer(buf, pool)) + } + if err == io.EOF { + err = nil + } + return result, err + } + if len(*buf) == usedCap { + result = append(result, NewBuffer(buf, pool)) + continue nextBuffer + } + } + } +} diff --git a/mem/buffer_slice_test.go b/mem/buffer_slice_test.go index bb4384434ee2..bb9303f0e9e1 100644 --- a/mem/buffer_slice_test.go +++ b/mem/buffer_slice_test.go @@ -20,6 +20,8 @@ package mem_test import ( "bytes" + "crypto/rand" + "errors" "fmt" "io" "testing" @@ -27,6 +29,12 @@ import ( "google.golang.org/grpc/mem" ) +const ( + minReadSize = 1 + // Should match the constant in buffer_slice.go (another package) + readAllBufSize = 32 * 1024 // 32 KiB +) + func newBuffer(data []byte, pool mem.BufferPool) mem.Buffer { return mem.NewBuffer(&data, pool) } @@ -156,6 +164,252 @@ func (s) TestBufferSlice_Reader(t *testing.T) { } } +// TestBufferSlice_ReadAll_Reads exercises ReadAll by allowing it to read +// various combinations of data, empty data, EOF. +func (s) TestBufferSlice_ReadAll_Reads(t *testing.T) { + testcases := []struct { + name string + reads []readStep + wantErr string + wantBufs int + }{ + { + name: "EOF", + reads: []readStep{ + { + err: io.EOF, + }, + }, + }, + { + name: "data,EOF", + reads: []readStep{ + { + n: minReadSize, + }, + { + err: io.EOF, + }, + }, + wantBufs: 1, + }, + { + name: "data+EOF", + reads: []readStep{ + { + n: minReadSize, + err: io.EOF, + }, + }, + wantBufs: 1, + }, + { + name: "0,data+EOF", + reads: []readStep{ + {}, + { + n: minReadSize, + err: io.EOF, + }, + }, + wantBufs: 1, + }, + { + name: "0,data,EOF", + reads: []readStep{ + {}, + { + n: minReadSize, + }, + { + err: io.EOF, + }, + }, + wantBufs: 1, + }, + { + name: "data,data+EOF", + reads: []readStep{ + { + n: minReadSize, + }, + { + n: minReadSize, + err: io.EOF, + }, + }, + wantBufs: 1, + }, + { + name: "error", + reads: []readStep{ + { + err: errors.New("boom"), + }, + }, + wantErr: "boom", + }, + { + name: "data+error", + reads: []readStep{ + { + n: minReadSize, + err: errors.New("boom"), + }, + }, + wantErr: "boom", + wantBufs: 1, + }, + { + name: "data,data+error", + reads: []readStep{ + { + n: minReadSize, + }, + { + n: minReadSize, + err: errors.New("boom"), + }, + }, + wantErr: "boom", + wantBufs: 1, + }, + { + name: "data,data+EOF - whole buf", + reads: []readStep{ + { + n: minReadSize, + }, + { + n: readAllBufSize - minReadSize, + err: io.EOF, + }, + }, + wantBufs: 1, + }, + { + name: "data,data,EOF - whole buf", + reads: []readStep{ + { + n: minReadSize, + }, + { + n: readAllBufSize - minReadSize, + }, + { + err: io.EOF, + }, + }, + wantBufs: 1, + }, + { + name: "data,data,EOF - 2 bufs", + reads: []readStep{ + { + n: readAllBufSize, + }, + { + n: minReadSize, + }, + { + n: readAllBufSize - minReadSize, + }, + { + n: minReadSize, + }, + { + err: io.EOF, + }, + }, + wantBufs: 3, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + pool := &testPool{ + allocated: make(map[*[]byte]struct{}), + } + r := &stepReader{ + reads: tc.reads, + } + data, err := mem.ReadAll(r, pool) + if tc.wantErr != "" { + if err == nil || err.Error() != tc.wantErr { + t.Fatalf("ReadAll() returned err %v, wanted %q", err, tc.wantErr) + } + } else { + if err != nil { + t.Fatal(err) + } + } + gotData := data.Materialize() + if !bytes.Equal(r.read, gotData) { + t.Fatalf("ReadAll() returned data %q, wanted %q", gotData, r.read) + } + if len(data) != tc.wantBufs { + t.Fatalf("ReadAll() returned %d bufs, wanted %d bufs", len(data), tc.wantBufs) + } + // all but last should be full buffers + for i := 0; i < len(data)-1; i++ { + if data[i].Len() != readAllBufSize { + t.Fatalf("ReadAll() returned data length %d, wanted %d", data[i].Len(), readAllBufSize) + } + } + data.Free() + if len(pool.allocated) > 0 { + t.Fatalf("got %d allocated buffers, wanted none", len(pool.allocated)) + } + }) + } +} + +func (s) TestBufferSlice_ReadAll_WriteTo(t *testing.T) { + testcases := []struct { + name string + size int + }{ + { + name: "small", + size: minReadSize, + }, + { + name: "exact size", + size: readAllBufSize, + }, + { + name: "big", + size: readAllBufSize * 3, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + pool := &testPool{ + allocated: make(map[*[]byte]struct{}), + } + buf := make([]byte, tc.size) + _, err := rand.Read(buf) + if err != nil { + t.Fatal(err) + } + r := bytes.NewBuffer(buf) + data, err := mem.ReadAll(r, pool) + if err != nil { + t.Fatal(err) + } + + gotData := data.Materialize() + if !bytes.Equal(buf, gotData) { + t.Fatalf("ReadAll() = %q, wanted %q", gotData, buf) + } + data.Free() + if len(pool.allocated) > 0 { + t.Fatalf("wanted no allocated buffers, got %d", len(pool.allocated)) + } + }) + } +} + func ExampleNewWriter() { var bs mem.BufferSlice pool := mem.DefaultBufferPool() @@ -176,3 +430,57 @@ func ExampleNewWriter() { // Wrote 4 bytes, err: // abcdabcdabcd } + +var ( + _ io.Reader = (*stepReader)(nil) + _ mem.BufferPool = (*testPool)(nil) +) + +// readStep describes what a single stepReader.Read should do - how much data +// to return and what error to return. +type readStep struct { + n int + err error +} + +// stepReader implements io.Reader that reads specified amount of data and/or +// returns the specified error in specified steps. +// The read data is accumulated in the read field. +type stepReader struct { + reads []readStep + read []byte +} + +func (s *stepReader) Read(buf []byte) (int, error) { + if len(s.reads) == 0 { + panic("unexpected Read() call") + } + read := s.reads[0] + s.reads = s.reads[1:] + _, err := rand.Read(buf[:read.n]) + if err != nil { + panic(err) + } + s.read = append(s.read, buf[:read.n]...) + return read.n, read.err +} + +// testPool is an implementation of BufferPool that allows to ensure that: +// - there are matching Put calls for all Get calls. +// - there are no unexpected Put calls. +type testPool struct { + allocated map[*[]byte]struct{} +} + +func (t *testPool) Get(length int) *[]byte { + buf := make([]byte, length) + t.allocated[&buf] = struct{}{} + return &buf +} + +func (t *testPool) Put(buf *[]byte) { + if _, ok := t.allocated[buf]; !ok { + panic("unexpected put") + } + delete(t.allocated, buf) +} diff --git a/rpc_util.go b/rpc_util.go index 033ffdc1c9bf..06c1f1b2855e 100644 --- a/rpc_util.go +++ b/rpc_util.go @@ -899,8 +899,7 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMes // } //} - var out mem.BufferSlice - _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1), pool) if err != nil { out.Free() return nil, 0, err From e2b98f96c919c49d1f9dd731c1a03fb49a6281a7 Mon Sep 17 00:00:00 2001 From: Arjan Singh Bal <46515553+arjan-bal@users.noreply.github.com> Date: Tue, 12 Nov 2024 14:34:17 +0530 Subject: [PATCH 56/57] pickfirst: Implement Happy Eyeballs (#7725) --- balancer/pickfirst/internal/internal.go | 17 +- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 181 +++++++++---- .../pickfirstleaf/pickfirstleaf_ext_test.go | 243 ++++++++++++++++-- .../pickfirstleaf/pickfirstleaf_test.go | 13 - 4 files changed, 369 insertions(+), 85 deletions(-) diff --git a/balancer/pickfirst/internal/internal.go b/balancer/pickfirst/internal/internal.go index c51978945844..7d66cb491c40 100644 --- a/balancer/pickfirst/internal/internal.go +++ b/balancer/pickfirst/internal/internal.go @@ -18,7 +18,18 @@ // Package internal contains code internal to the pickfirst package. package internal -import "math/rand" +import ( + rand "math/rand/v2" + "time" +) -// RandShuffle pseudo-randomizes the order of addresses. -var RandShuffle = rand.Shuffle +var ( + // RandShuffle pseudo-randomizes the order of addresses. + RandShuffle = rand.Shuffle + // TimeAfterFunc allows mocking the timer for testing connection delay + // related functionality. + TimeAfterFunc = func(d time.Duration, f func()) func() { + timer := time.AfterFunc(d, f) + return func() { timer.Stop() } + } +) diff --git a/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index 4b54866058d5..aaec87497fd4 100644 --- a/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -31,6 +31,7 @@ import ( "fmt" "net" "sync" + "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" @@ -59,8 +60,13 @@ var ( Name = "pick_first_leaf" ) -// TODO: change to pick-first when this becomes the default pick_first policy. -const logPrefix = "[pick-first-leaf-lb %p] " +const ( + // TODO: change to pick-first when this becomes the default pick_first policy. + logPrefix = "[pick-first-leaf-lb %p] " + // connectionDelayInterval is the time to wait for during the happy eyeballs + // pass before starting the next connection attempt. + connectionDelayInterval = 250 * time.Millisecond +) type ipAddrFamily int @@ -76,11 +82,12 @@ type pickfirstBuilder struct{} func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{ - cc: cc, - addressList: addressList{}, - subConns: resolver.NewAddressMap(), - state: connectivity.Connecting, - mu: sync.Mutex{}, + cc: cc, + addressList: addressList{}, + subConns: resolver.NewAddressMap(), + state: connectivity.Connecting, + mu: sync.Mutex{}, + cancelConnectionTimer: func() {}, } b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b @@ -115,8 +122,9 @@ type scData struct { subConn balancer.SubConn addr resolver.Address - state connectivity.State - lastErr error + state connectivity.State + lastErr error + connectionFailedInFirstPass bool } func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { @@ -148,10 +156,11 @@ type pickfirstBalancer struct { mu sync.Mutex state connectivity.State // scData for active subonns mapped by address. - subConns *resolver.AddressMap - addressList addressList - firstPass bool - numTF int + subConns *resolver.AddressMap + addressList addressList + firstPass bool + numTF int + cancelConnectionTimer func() } // ResolverError is called by the ClientConn when the name resolver produces @@ -186,6 +195,7 @@ func (b *pickfirstBalancer) resolverErrorLocked(err error) { func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { b.mu.Lock() defer b.mu.Unlock() + b.cancelConnectionTimer() if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { // Cleanup state pertaining to the previous resolver state. // Treat an empty address list like an error by calling b.ResolverError. @@ -239,12 +249,8 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Not de-duplicating would result in attempting to connect to the same // SubConn multiple times in the same pass. We don't want this. newAddrs = deDupAddresses(newAddrs) - newAddrs = interleaveAddresses(newAddrs) - // Since we have a new set of addresses, we are again at first pass. - b.firstPass = true - // If the previous ready SubConn exists in new address list, // keep this connection and don't create new SubConns. prevAddr := b.addressList.currentAddress() @@ -269,11 +275,11 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) - b.requestConnectionLocked() + b.startFirstPassLocked() } else if b.state == connectivity.TransientFailure { // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until // we're READY. See A62. - b.requestConnectionLocked() + b.startFirstPassLocked() } return nil } @@ -288,6 +294,7 @@ func (b *pickfirstBalancer) Close() { b.mu.Lock() defer b.mu.Unlock() b.closeSubConnsLocked() + b.cancelConnectionTimer() b.state = connectivity.Shutdown } @@ -297,12 +304,21 @@ func (b *pickfirstBalancer) Close() { func (b *pickfirstBalancer) ExitIdle() { b.mu.Lock() defer b.mu.Unlock() - if b.state == connectivity.Idle && b.addressList.currentAddress() == b.addressList.first() { - b.firstPass = true - b.requestConnectionLocked() + if b.state == connectivity.Idle { + b.startFirstPassLocked() } } +func (b *pickfirstBalancer) startFirstPassLocked() { + b.firstPass = true + b.numTF = 0 + // Reset the connection attempt record for existing SubConns. + for _, sd := range b.subConns.Values() { + sd.(*scData).connectionFailedInFirstPass = false + } + b.requestConnectionLocked() +} + func (b *pickfirstBalancer) closeSubConnsLocked() { for _, sd := range b.subConns.Values() { sd.(*scData).subConn.Shutdown() @@ -413,6 +429,7 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) // shutdownRemainingLocked shuts down remaining subConns. Called when a subConn // becomes ready, which means that all other subConn must be shutdown. func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { + b.cancelConnectionTimer() for _, v := range b.subConns.Values() { sd := v.(*scData) if sd.subConn != selected.subConn { @@ -456,30 +473,69 @@ func (b *pickfirstBalancer) requestConnectionLocked() { switch scd.state { case connectivity.Idle: scd.subConn.Connect() + b.scheduleNextConnectionLocked() + return case connectivity.TransientFailure: - // Try the next address. + // The SubConn is being re-used and failed during a previous pass + // over the addressList. It has not completed backoff yet. + // Mark it as having failed and try the next address. + scd.connectionFailedInFirstPass = true lastErr = scd.lastErr continue - case connectivity.Ready: - // Should never happen. - b.logger.Errorf("Requesting a connection even though we have a READY SubConn") - case connectivity.Shutdown: - // Should never happen. - b.logger.Errorf("SubConn with state SHUTDOWN present in SubConns map") case connectivity.Connecting: - // Wait for the SubConn to report success or failure. + // Wait for the connection attempt to complete or the timer to fire + // before attempting the next address. + b.scheduleNextConnectionLocked() + return + default: + b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.state) + return + } - return } + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the - // first pass. - b.endFirstPassLocked(lastErr) + // first pass if possible. + b.endFirstPassIfPossibleLocked(lastErr) +} + +func (b *pickfirstBalancer) scheduleNextConnectionLocked() { + b.cancelConnectionTimer() + if !b.addressList.hasNext() { + return + } + curAddr := b.addressList.currentAddress() + cancelled := false // Access to this is protected by the balancer's mutex. + closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { + b.mu.Lock() + defer b.mu.Unlock() + // If the scheduled task is cancelled while acquiring the mutex, return. + if cancelled { + return + } + if b.logger.V(2) { + b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) + } + if b.addressList.increment() { + b.requestConnectionLocked() + } + }) + // Access to the cancellation callback held by the balancer is guarded by + // the balancer's mutex, so it's safe to set the boolean from the callback. + b.cancelConnectionTimer = sync.OnceFunc(func() { + cancelled = true + closeFn() + }) } func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { b.mu.Lock() defer b.mu.Unlock() oldState := sd.state + // Record a connection attempt when exiting CONNECTING. + if newState.ConnectivityState == connectivity.TransientFailure { + sd.connectionFailedInFirstPass = true + } sd.state = newState.ConnectivityState // Previously relevant SubConns can still callback with state updates. // To prevent pickers from returning these obsolete SubConns, this logic @@ -545,17 +601,20 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub sd.lastErr = newState.ConnectionError // Since we're re-using common SubConns while handling resolver // updates, we could receive an out of turn TRANSIENT_FAILURE from - // a pass over the previous address list. We ignore such updates. - - if curAddr := b.addressList.currentAddress(); !equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { - return - } - if b.addressList.increment() { - b.requestConnectionLocked() - return + // a pass over the previous address list. Happy Eyeballs will also + // cause out of order updates to arrive. + + if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + b.cancelConnectionTimer() + if b.addressList.increment() { + b.requestConnectionLocked() + return + } } - // End of the first pass. - b.endFirstPassLocked(newState.ConnectionError) + + // End the first pass if we've seen a TRANSIENT_FAILURE from all + // SubConns once. + b.endFirstPassIfPossibleLocked(newState.ConnectionError) } return } @@ -580,9 +639,22 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub } } -func (b *pickfirstBalancer) endFirstPassLocked(lastErr error) { +// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the +// addresses are tried and their SubConns have reported a failure. +func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { + // An optimization to avoid iterating over the entire SubConn map. + if b.addressList.isValid() { + return + } + // Connect() has been called on all the SubConns. The first pass can be + // ended if all the SubConns have reported a failure. + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if !sd.connectionFailedInFirstPass { + return + } + } b.firstPass = false - b.numTF = 0 b.state = connectivity.TransientFailure b.cc.UpdateState(balancer.State{ @@ -654,15 +726,6 @@ func (al *addressList) currentAddress() resolver.Address { return al.addresses[al.idx] } -// first returns the first address in the list. If the list is empty, it returns -// an empty address instead. -func (al *addressList) first() resolver.Address { - if len(al.addresses) == 0 { - return resolver.Address{} - } - return al.addresses[0] -} - func (al *addressList) reset() { al.idx = 0 } @@ -685,6 +748,16 @@ func (al *addressList) seekTo(needle resolver.Address) bool { return false } +// hasNext returns whether incrementing the addressList will result in moving +// past the end of the list. If the list has already moved past the end, it +// returns false. +func (al *addressList) hasNext() bool { + if !al.isValid() { + return false + } + return al.idx+1 < len(al.addresses) +} + // equalAddressIgnoringBalAttributes returns true is a and b are considered // equal. This is different from the Equal method on the resolver.Address type // which considers all fields to determine equality. Here, we only consider diff --git a/balancer/pickfirst/pickfirstleaf/pickfirstleaf_ext_test.go b/balancer/pickfirst/pickfirstleaf/pickfirstleaf_ext_test.go index 46e47be43ffa..bf957f98b119 100644 --- a/balancer/pickfirst/pickfirstleaf/pickfirstleaf_ext_test.go +++ b/balancer/pickfirst/pickfirstleaf/pickfirstleaf_ext_test.go @@ -28,6 +28,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/balancer" + pfinternal "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" @@ -66,8 +67,7 @@ func Test(t *testing.T) { } // setupPickFirstLeaf performs steps required for pick_first tests. It starts a -// bunch of backends exporting the TestService, creates a ClientConn to them -// with service config specifying the use of the state_storing LB policy. +// bunch of backends exporting the TestService, and creates a ClientConn to them. func setupPickFirstLeaf(t *testing.T, backendCount int, opts ...grpc.DialOption) (*grpc.ClientConn, *manual.Resolver, *backendManager) { t.Helper() r := manual.NewBuilderWithScheme("whatever") @@ -86,7 +86,6 @@ func setupPickFirstLeaf(t *testing.T, backendCount int, opts ...grpc.DialOption) dopts := []grpc.DialOption{ grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r), - grpc.WithDefaultServiceConfig(stateStoringServiceConfig), } dopts = append(dopts, opts...) cc, err := grpc.NewClient(r.Scheme()+":///test.server", dopts...) @@ -121,7 +120,7 @@ func (s) TestPickFirstLeaf_SimpleResolverUpdate_FirstServerReady(t *testing.T) { balCh := make(chan *stateStoringBalancer, 1) balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) - cc, r, bm := setupPickFirstLeaf(t, 2) + cc, r, bm := setupPickFirstLeaf(t, 2, grpc.WithDefaultServiceConfig(stateStoringServiceConfig)) addrs := bm.resolverAddrs() stateSubscriber := &ccStateSubscriber{} internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) @@ -161,7 +160,7 @@ func (s) TestPickFirstLeaf_SimpleResolverUpdate_FirstServerUnReady(t *testing.T) balCh := make(chan *stateStoringBalancer, 1) balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) - cc, r, bm := setupPickFirstLeaf(t, 2) + cc, r, bm := setupPickFirstLeaf(t, 2, grpc.WithDefaultServiceConfig(stateStoringServiceConfig)) addrs := bm.resolverAddrs() stateSubscriber := &ccStateSubscriber{} internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) @@ -203,7 +202,7 @@ func (s) TestPickFirstLeaf_SimpleResolverUpdate_DuplicateAddrs(t *testing.T) { balCh := make(chan *stateStoringBalancer, 1) balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) - cc, r, bm := setupPickFirstLeaf(t, 2) + cc, r, bm := setupPickFirstLeaf(t, 2, grpc.WithDefaultServiceConfig(stateStoringServiceConfig)) addrs := bm.resolverAddrs() stateSubscriber := &ccStateSubscriber{} internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) @@ -259,7 +258,7 @@ func (s) TestPickFirstLeaf_ResolverUpdates_DisjointLists(t *testing.T) { balCh := make(chan *stateStoringBalancer, 1) balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) - cc, r, bm := setupPickFirstLeaf(t, 4) + cc, r, bm := setupPickFirstLeaf(t, 4, grpc.WithDefaultServiceConfig(stateStoringServiceConfig)) addrs := bm.resolverAddrs() stateSubscriber := &ccStateSubscriber{} internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) @@ -322,7 +321,7 @@ func (s) TestPickFirstLeaf_ResolverUpdates_ActiveBackendInUpdatedList(t *testing balCh := make(chan *stateStoringBalancer, 1) balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) - cc, r, bm := setupPickFirstLeaf(t, 3) + cc, r, bm := setupPickFirstLeaf(t, 3, grpc.WithDefaultServiceConfig(stateStoringServiceConfig)) addrs := bm.resolverAddrs() stateSubscriber := &ccStateSubscriber{} internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) @@ -386,7 +385,7 @@ func (s) TestPickFirstLeaf_ResolverUpdates_InActiveBackendInUpdatedList(t *testi balCh := make(chan *stateStoringBalancer, 1) balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) - cc, r, bm := setupPickFirstLeaf(t, 3) + cc, r, bm := setupPickFirstLeaf(t, 3, grpc.WithDefaultServiceConfig(stateStoringServiceConfig)) addrs := bm.resolverAddrs() stateSubscriber := &ccStateSubscriber{} internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) @@ -451,7 +450,7 @@ func (s) TestPickFirstLeaf_ResolverUpdates_IdenticalLists(t *testing.T) { balCh := make(chan *stateStoringBalancer, 1) balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) - cc, r, bm := setupPickFirstLeaf(t, 2) + cc, r, bm := setupPickFirstLeaf(t, 2, grpc.WithDefaultServiceConfig(stateStoringServiceConfig)) addrs := bm.resolverAddrs() stateSubscriber := &ccStateSubscriber{} internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) @@ -524,7 +523,7 @@ func (s) TestPickFirstLeaf_StopConnectedServer_FirstServerRestart(t *testing.T) balCh := make(chan *stateStoringBalancer, 1) balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) - cc, r, bm := setupPickFirstLeaf(t, 2) + cc, r, bm := setupPickFirstLeaf(t, 2, grpc.WithDefaultServiceConfig(stateStoringServiceConfig)) addrs := bm.resolverAddrs() stateSubscriber := &ccStateSubscriber{} internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) @@ -589,7 +588,7 @@ func (s) TestPickFirstLeaf_StopConnectedServer_SecondServerRestart(t *testing.T) balCh := make(chan *stateStoringBalancer, 1) balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) - cc, r, bm := setupPickFirstLeaf(t, 2) + cc, r, bm := setupPickFirstLeaf(t, 2, grpc.WithDefaultServiceConfig(stateStoringServiceConfig)) addrs := bm.resolverAddrs() stateSubscriber := &ccStateSubscriber{} internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) @@ -661,7 +660,7 @@ func (s) TestPickFirstLeaf_StopConnectedServer_SecondServerToFirst(t *testing.T) balCh := make(chan *stateStoringBalancer, 1) balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) - cc, r, bm := setupPickFirstLeaf(t, 2) + cc, r, bm := setupPickFirstLeaf(t, 2, grpc.WithDefaultServiceConfig(stateStoringServiceConfig)) addrs := bm.resolverAddrs() stateSubscriber := &ccStateSubscriber{} internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) @@ -733,7 +732,7 @@ func (s) TestPickFirstLeaf_StopConnectedServer_FirstServerToSecond(t *testing.T) balCh := make(chan *stateStoringBalancer, 1) balancer.Register(&stateStoringBalancerBuilder{balancer: balCh}) - cc, r, bm := setupPickFirstLeaf(t, 2) + cc, r, bm := setupPickFirstLeaf(t, 2, grpc.WithDefaultServiceConfig(stateStoringServiceConfig)) addrs := bm.resolverAddrs() stateSubscriber := &ccStateSubscriber{} internal.SubscribeToConnectivityStateChanges.(func(cc *grpc.ClientConn, s grpcsync.Subscriber) func())(cc, stateSubscriber) @@ -807,7 +806,7 @@ func (s) TestPickFirstLeaf_EmptyAddressList(t *testing.T) { defer cancel() balChan := make(chan *stateStoringBalancer, 1) balancer.Register(&stateStoringBalancerBuilder{balancer: balChan}) - cc, r, bm := setupPickFirstLeaf(t, 1) + cc, r, bm := setupPickFirstLeaf(t, 1, grpc.WithDefaultServiceConfig(stateStoringServiceConfig)) addrs := bm.resolverAddrs() stateSubscriber := &ccStateSubscriber{} @@ -850,6 +849,189 @@ func (s) TestPickFirstLeaf_EmptyAddressList(t *testing.T) { } } +// Test verifies that pickfirst correctly detects the end of the first happy +// eyeballs pass when the timer causes pickfirst to reach the end of the address +// list and failures are reported out of order. +func (s) TestPickFirstLeaf_HappyEyeballs_TF_AfterEndOfList(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + originalTimer := pfinternal.TimeAfterFunc + defer func() { + pfinternal.TimeAfterFunc = originalTimer + }() + triggerTimer, timeAfter := mockTimer() + pfinternal.TimeAfterFunc = timeAfter + + dialer := testutils.NewBlockingDialer() + opts := []grpc.DialOption{ + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, pickfirstleaf.Name)), + grpc.WithContextDialer(dialer.DialContext), + } + cc, rb, bm := setupPickFirstLeaf(t, 3, opts...) + addrs := bm.resolverAddrs() + holds := bm.holds(dialer) + rb.UpdateState(resolver.State{Addresses: addrs}) + cc.Connect() + + testutils.AwaitState(ctx, t, cc, connectivity.Connecting) + + // Verify that only the first server is contacted. + if holds[0].Wait(ctx) != true { + t.Fatalf("Timeout waiting for server %d with address %q to be contacted", 0, addrs[0]) + } + if holds[1].IsStarted() != false { + t.Fatalf("Server %d with address %q contacted unexpectedly", 1, addrs[1]) + } + if holds[2].IsStarted() != false { + t.Fatalf("Server %d with address %q contacted unexpectedly", 2, addrs[2]) + } + + // Make the happy eyeballs timer fire once and verify that the + // second server is contacted, but the third isn't. + triggerTimer() + if holds[1].Wait(ctx) != true { + t.Fatalf("Timeout waiting for server %d with address %q to be contacted", 1, addrs[1]) + } + if holds[2].IsStarted() != false { + t.Fatalf("Server %d with address %q contacted unexpectedly", 2, addrs[2]) + } + + // Make the happy eyeballs timer fire once more and verify that the + // third server is contacted. + triggerTimer() + if holds[2].Wait(ctx) != true { + t.Fatalf("Timeout waiting for server %d with address %q to be contacted", 2, addrs[2]) + } + + // First SubConn Fails. + holds[0].Fail(fmt.Errorf("test error")) + + // No TF should be reported until the first pass is complete. + shortCtx, shortCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer shortCancel() + testutils.AwaitNotState(shortCtx, t, cc, connectivity.TransientFailure) + + // Third SubConn fails. + shortCtx, shortCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + defer shortCancel() + holds[2].Fail(fmt.Errorf("test error")) + testutils.AwaitNotState(shortCtx, t, cc, connectivity.TransientFailure) + + // Last SubConn fails, this should result in a TF update. + holds[1].Fail(fmt.Errorf("test error")) + testutils.AwaitState(ctx, t, cc, connectivity.TransientFailure) +} + +// Test verifies that pickfirst attempts to connect to the second backend once +// the happy eyeballs timer expires. +func (s) TestPickFirstLeaf_HappyEyeballs_TriggerConnectionDelay(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + originalTimer := pfinternal.TimeAfterFunc + defer func() { + pfinternal.TimeAfterFunc = originalTimer + }() + triggerTimer, timeAfter := mockTimer() + pfinternal.TimeAfterFunc = timeAfter + + dialer := testutils.NewBlockingDialer() + opts := []grpc.DialOption{ + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, pickfirstleaf.Name)), + grpc.WithContextDialer(dialer.DialContext), + } + cc, rb, bm := setupPickFirstLeaf(t, 2, opts...) + addrs := bm.resolverAddrs() + holds := bm.holds(dialer) + rb.UpdateState(resolver.State{Addresses: addrs}) + cc.Connect() + + testutils.AwaitState(ctx, t, cc, connectivity.Connecting) + + // Verify that only the first server is contacted. + if holds[0].Wait(ctx) != true { + t.Fatalf("Timeout waiting for server %d with address %q to be contacted", 0, addrs[0]) + } + if holds[1].IsStarted() != false { + t.Fatalf("Server %d with address %q contacted unexpectedly", 1, addrs[1]) + } + + // Make the happy eyeballs timer fire once and verify that the + // second server is contacted. + triggerTimer() + if holds[1].Wait(ctx) != true { + t.Fatalf("Timeout waiting for server %d with address %q to be contacted", 1, addrs[1]) + } + + // Get the connection attempt to the second server to succeed and verify + // that the channel becomes READY. + holds[1].Resume() + testutils.AwaitState(ctx, t, cc, connectivity.Ready) +} + +// Test tests the pickfirst balancer by causing a SubConn to fail and then +// jumping to the 3rd SubConn after the happy eyeballs timer expires. +func (s) TestPickFirstLeaf_HappyEyeballs_TF_ThenTimerFires(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + originalTimer := pfinternal.TimeAfterFunc + defer func() { + pfinternal.TimeAfterFunc = originalTimer + }() + triggerTimer, timeAfter := mockTimer() + pfinternal.TimeAfterFunc = timeAfter + + dialer := testutils.NewBlockingDialer() + opts := []grpc.DialOption{ + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, pickfirstleaf.Name)), + grpc.WithContextDialer(dialer.DialContext), + } + cc, rb, bm := setupPickFirstLeaf(t, 3, opts...) + addrs := bm.resolverAddrs() + holds := bm.holds(dialer) + rb.UpdateState(resolver.State{Addresses: addrs}) + cc.Connect() + + testutils.AwaitState(ctx, t, cc, connectivity.Connecting) + + // Verify that only the first server is contacted. + if holds[0].Wait(ctx) != true { + t.Fatalf("Timeout waiting for server %d with address %q to be contacted", 0, addrs[0]) + } + if holds[1].IsStarted() != false { + t.Fatalf("Server %d with address %q contacted unexpectedly", 1, addrs[1]) + } + if holds[2].IsStarted() != false { + t.Fatalf("Server %d with address %q contacted unexpectedly", 2, addrs[2]) + } + + // First SubConn Fails. + holds[0].Fail(fmt.Errorf("test error")) + + // Verify that only the second server is contacted. + if holds[1].Wait(ctx) != true { + t.Fatalf("Timeout waiting for server %d with address %q to be contacted", 1, addrs[1]) + } + if holds[2].IsStarted() != false { + t.Fatalf("Server %d with address %q contacted unexpectedly", 2, addrs[2]) + } + + // The happy eyeballs timer expires, pickfirst should stop waiting for + // server[1] to report a failure/success and request the creation of a third + // SubConn. + triggerTimer() + if holds[2].Wait(ctx) != true { + t.Fatalf("Timeout waiting for server %d with address %q to be contacted", 2, addrs[2]) + } + + // Get the connection attempt to the second server to succeed and verify + // that the channel becomes READY. + holds[1].Resume() + testutils.AwaitState(ctx, t, cc, connectivity.Ready) +} + func (s) TestPickFirstLeaf_InterleavingIPV4Preffered(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -1106,6 +1288,14 @@ func (b *backendManager) resolverAddrs() []resolver.Address { return addrs } +func (b *backendManager) holds(dialer *testutils.BlockingDialer) []*testutils.Hold { + holds := []*testutils.Hold{} + for _, addr := range b.resolverAddrs() { + holds = append(holds, dialer.Hold(addr.Addr)) + } + return holds +} + type ccStateSubscriber struct { transitions []connectivity.State } @@ -1113,3 +1303,26 @@ type ccStateSubscriber struct { func (c *ccStateSubscriber) OnMessage(msg any) { c.transitions = append(c.transitions, msg.(connectivity.State)) } + +// mockTimer returns a fake timeAfterFunc that will not trigger automatically. +// It returns a function that can be called to manually trigger the execution +// of the scheduled callback. +func mockTimer() (triggerFunc func(), timerFunc func(_ time.Duration, f func()) func()) { + timerCh := make(chan struct{}) + triggerFunc = func() { + timerCh <- struct{}{} + } + return triggerFunc, func(_ time.Duration, f func()) func() { + stopCh := make(chan struct{}) + go func() { + select { + case <-timerCh: + f() + case <-stopCh: + } + }() + return sync.OnceFunc(func() { + close(stopCh) + }) + } +} diff --git a/balancer/pickfirst/pickfirstleaf/pickfirstleaf_test.go b/balancer/pickfirst/pickfirstleaf/pickfirstleaf_test.go index 84b3cb65bed4..71984a238cd5 100644 --- a/balancer/pickfirst/pickfirstleaf/pickfirstleaf_test.go +++ b/balancer/pickfirst/pickfirstleaf/pickfirstleaf_test.go @@ -73,21 +73,8 @@ func (s) TestAddressList_Iteration(t *testing.T) { } addressList := addressList{} - emptyAddress := resolver.Address{} - if got, want := addressList.first(), emptyAddress; got != want { - t.Fatalf("addressList.first() = %v, want %v", got, want) - } - addressList.updateAddrs(addrs) - if got, want := addressList.first(), addressList.currentAddress(); got != want { - t.Fatalf("addressList.first() = %v, want %v", got, want) - } - - if got, want := addressList.first(), addrs[0]; got != want { - t.Fatalf("addressList.first() = %v, want %v", got, want) - } - for i := 0; i < len(addrs); i++ { if got, want := addressList.isValid(), true; got != want { t.Fatalf("addressList.isValid() = %t, want %t", got, want) From 8338c5d5aa4c17aa9e74312831266884456d5d1e Mon Sep 17 00:00:00 2001 From: Arjan Bal Date: Tue, 12 Nov 2024 22:45:15 +0530 Subject: [PATCH 57/57] Remove go patch version from go.mod --- cmd/protoc-gen-go-grpc/go.mod | 2 +- examples/go.mod | 2 +- gcp/observability/go.mod | 2 +- go.mod | 2 +- interop/observability/go.mod | 2 +- interop/xds/go.mod | 2 +- security/advancedtls/examples/go.mod | 2 +- security/advancedtls/go.mod | 2 +- stats/opencensus/go.mod | 2 +- test/tools/go.mod | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/cmd/protoc-gen-go-grpc/go.mod b/cmd/protoc-gen-go-grpc/go.mod index 00b37ce12e68..d31ccb766651 100644 --- a/cmd/protoc-gen-go-grpc/go.mod +++ b/cmd/protoc-gen-go-grpc/go.mod @@ -1,6 +1,6 @@ module google.golang.org/grpc/cmd/protoc-gen-go-grpc -go 1.22.7 +go 1.22 require ( google.golang.org/grpc v1.65.0 diff --git a/examples/go.mod b/examples/go.mod index 52451c857af7..ed63e04cf9fd 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -1,6 +1,6 @@ module google.golang.org/grpc/examples -go 1.22.7 +go 1.22 require ( github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod index cd9fdb54e229..f11b9c23569d 100644 --- a/gcp/observability/go.mod +++ b/gcp/observability/go.mod @@ -1,6 +1,6 @@ module google.golang.org/grpc/gcp/observability -go 1.22.7 +go 1.22 require ( cloud.google.com/go/logging v1.12.0 diff --git a/go.mod b/go.mod index 91008eeb7a42..1bbd024d22c1 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module google.golang.org/grpc -go 1.22.7 +go 1.22 require ( github.com/cespare/xxhash/v2 v2.3.0 diff --git a/interop/observability/go.mod b/interop/observability/go.mod index eb06c93c99d0..149abb2e900f 100644 --- a/interop/observability/go.mod +++ b/interop/observability/go.mod @@ -1,6 +1,6 @@ module google.golang.org/grpc/interop/observability -go 1.22.7 +go 1.22 require ( google.golang.org/grpc v1.67.1 diff --git a/interop/xds/go.mod b/interop/xds/go.mod index d3fa9e1b1547..5d37a33bce40 100644 --- a/interop/xds/go.mod +++ b/interop/xds/go.mod @@ -1,6 +1,6 @@ module google.golang.org/grpc/interop/xds -go 1.22.7 +go 1.22 replace google.golang.org/grpc => ../.. diff --git a/security/advancedtls/examples/go.mod b/security/advancedtls/examples/go.mod index fed6883ddb16..c7dbca3f301b 100644 --- a/security/advancedtls/examples/go.mod +++ b/security/advancedtls/examples/go.mod @@ -1,6 +1,6 @@ module google.golang.org/grpc/security/advancedtls/examples -go 1.22.7 +go 1.22 require ( google.golang.org/grpc v1.67.1 diff --git a/security/advancedtls/go.mod b/security/advancedtls/go.mod index 8f678372dc85..a010a986ef3f 100644 --- a/security/advancedtls/go.mod +++ b/security/advancedtls/go.mod @@ -1,6 +1,6 @@ module google.golang.org/grpc/security/advancedtls -go 1.22.7 +go 1.22 require ( github.com/google/go-cmp v0.6.0 diff --git a/stats/opencensus/go.mod b/stats/opencensus/go.mod index c2bae551a9c7..919333509fc3 100644 --- a/stats/opencensus/go.mod +++ b/stats/opencensus/go.mod @@ -1,6 +1,6 @@ module google.golang.org/grpc/stats/opencensus -go 1.22.7 +go 1.22 require ( github.com/google/go-cmp v0.6.0 diff --git a/test/tools/go.mod b/test/tools/go.mod index 49c34127fa70..d4581f055447 100644 --- a/test/tools/go.mod +++ b/test/tools/go.mod @@ -1,6 +1,6 @@ module google.golang.org/grpc/test/tools -go 1.22.7 +go 1.22 require ( github.com/client9/misspell v0.3.4