From 3a594f6a60e4213faeb583c5d85e8cea92c134ad Mon Sep 17 00:00:00 2001 From: Arjan Bal Date: Wed, 25 Dec 2024 00:46:32 +0530 Subject: [PATCH 1/4] Update roundrobin to use endpointsharding --- .../endpointsharding/endpointsharding_test.go | 9 +- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 3 +- balancer/roundrobin/roundrobin.go | 93 ++++-- balancer/weightedroundrobin/balancer.go | 1 + .../weightedtarget/weightedtarget_test.go | 300 +++++++++++------ internal/balancergroup/balancergroup_test.go | 184 +++++----- internal/testutils/balancer.go | 44 ++- internal/testutils/wrr.go | 3 + test/roundrobin_test.go | 22 +- .../balancer/clusterimpl/balancer_test.go | 101 ++++-- .../clustermanager/clustermanager_test.go | 78 +++-- .../clusterresolver/e2e_test/eds_impl_test.go | 4 +- .../balancer/priority/balancer_test.go | 316 +++++++++++------- .../balancer/ringhash/ringhash_test.go | 1 + 14 files changed, 717 insertions(+), 442 deletions(-) diff --git a/balancer/endpointsharding/endpointsharding_test.go b/balancer/endpointsharding/endpointsharding_test.go index a4f6fe6c6c91..c88ed08fc6b4 100644 --- a/balancer/endpointsharding/endpointsharding_test.go +++ b/balancer/endpointsharding/endpointsharding_test.go @@ -16,7 +16,7 @@ * */ -package endpointsharding +package endpointsharding_test import ( "context" @@ -28,6 +28,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/endpointsharding" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" @@ -55,7 +56,7 @@ var logger = grpclog.Component("endpoint-sharding-test") func init() { var err error - gracefulSwitchPickFirst, err = ParseConfig(json.RawMessage(PickFirstConfig)) + gracefulSwitchPickFirst, err = endpointsharding.ParseConfig(json.RawMessage(endpointsharding.PickFirstConfig)) if err != nil { logger.Fatal(err) } @@ -75,7 +76,7 @@ func (fakePetioleBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptio ClientConn: cc, bOpts: opts, } - fp.Balancer = NewBalancer(fp, opts) + fp.Balancer = endpointsharding.NewBalancer(fp, opts) return fp } @@ -105,7 +106,7 @@ func (fp *fakePetiole) UpdateClientConnState(state balancer.ClientConnState) err } func (fp *fakePetiole) UpdateState(state balancer.State) { - childStates := ChildStatesFromPicker(state.Picker) + childStates := endpointsharding.ChildStatesFromPicker(state.Picker) // Both child states should be present in the child picker. States and // picker change over the lifecycle of test, but there should always be two. if len(childStates) != 2 { diff --git a/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index 76fa5fea95f2..1818ad065041 100644 --- a/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -618,6 +618,7 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub // Record a connection attempt when exiting CONNECTING. if newState.ConnectivityState == connectivity.TransientFailure { sd.connectionFailedInFirstPass = true + sd.lastErr = newState.ConnectionError connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) } @@ -702,7 +703,6 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub }) } case connectivity.TransientFailure: - sd.lastErr = newState.ConnectionError sd.effectiveState = connectivity.TransientFailure // Since we're re-using common SubConns while handling resolver // updates, we could receive an out of turn TRANSIENT_FAILURE from @@ -728,7 +728,6 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub switch newState.ConnectivityState { case connectivity.TransientFailure: b.numTF = (b.numTF + 1) % b.subConns.Len() - sd.lastErr = newState.ConnectionError if b.numTF%b.subConns.Len() == 0 { b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.TransientFailure, diff --git a/balancer/roundrobin/roundrobin.go b/balancer/roundrobin/roundrobin.go index 80a42d22510c..2ee840ceff15 100644 --- a/balancer/roundrobin/roundrobin.go +++ b/balancer/roundrobin/roundrobin.go @@ -22,60 +22,81 @@ package roundrobin import ( - rand "math/rand/v2" - "sync/atomic" + "encoding/json" + "fmt" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/endpointsharding" + "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/serviceconfig" ) // Name is the name of round_robin balancer. const Name = "round_robin" -var logger = grpclog.Component("roundrobin") - -// newBuilder creates a new roundrobin balancer builder. -func newBuilder() balancer.Builder { - return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) -} +var ( + logger = grpclog.Component("roundrobin") + // endpointSharding which specifies pick first children. + endpointShardingLBConfig serviceconfig.LoadBalancingConfig +) func init() { - balancer.Register(newBuilder()) + var err error + endpointShardingLBConfig, err = endpointsharding.ParseConfig(json.RawMessage(endpointsharding.PickFirstConfig)) + if err != nil { + logger.Fatal(err) + } + balancer.Register(builder{}) } -type rrPickerBuilder struct{} +type builder struct{} -func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { - logger.Infof("roundrobinPicker: Build called with info: %v", info) - if len(info.ReadySCs) == 0 { - return base.NewErrPicker(balancer.ErrNoSubConnAvailable) - } - scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) - for sc := range info.ReadySCs { - scs = append(scs, sc) +func (bb builder) Name() string { + return Name +} + +func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + bal := &rrBalancer{ + cc: cc, + child: endpointsharding.NewBalancer(cc, opts), } - return &rrPicker{ - subConns: scs, - // Start at a random index, as the same RR balancer rebuilds a new - // picker when SubConn states change, and we don't want to apply excess - // load to the first server in the list. - next: uint32(rand.IntN(len(scs))), + bal.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[%p] ", bal)) + bal.logger.Infof("Created") + return bal +} + +type rrBalancer struct { + cc balancer.ClientConn + child balancer.Balancer + logger *internalgrpclog.PrefixLogger +} + +func (b *rrBalancer) Close() { + b.child.Close() +} + +func (b *rrBalancer) ExitIdle() { + // Should always be ok, as child is endpoint sharding. + if ei, ok := b.child.(balancer.ExitIdler); ok { + ei.ExitIdle() } } -type rrPicker struct { - // subConns is the snapshot of the roundrobin balancer when this picker was - // created. The slice is immutable. Each Get() will do a round robin - // selection from it and return the selected SubConn. - subConns []balancer.SubConn - next uint32 +func (b *rrBalancer) ResolverError(err error) { + // Will cause inline picker update from endpoint sharding. + b.child.ResolverError(err) } -func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - subConnsLen := uint32(len(p.subConns)) - nextIndex := atomic.AddUint32(&p.next, 1) +func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + // Enable the health listener in pickfirst children for client side health + // checks and outlier detection, if configured. + ccs.ResolverState = pickfirstleaf.EnableHealthListener(ccs.ResolverState) + ccs.BalancerConfig = endpointShardingLBConfig + return b.child.UpdateClientConnState(ccs) +} - sc := p.subConns[nextIndex%subConnsLen] - return balancer.PickResult{SubConn: sc}, nil +func (b *rrBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) } diff --git a/balancer/weightedroundrobin/balancer.go b/balancer/weightedroundrobin/balancer.go index d7b9dc4666ee..136f6c56e575 100644 --- a/balancer/weightedroundrobin/balancer.go +++ b/balancer/weightedroundrobin/balancer.go @@ -405,6 +405,7 @@ func (b *wrrBalancer) Close() { ew.stopORCAListener() } } + b.child.Close() } func (b *wrrBalancer) ExitIdle() { diff --git a/balancer/weightedtarget/weightedtarget_test.go b/balancer/weightedtarget/weightedtarget_test.go index 5251ed6c1049..3edabbefe504 100644 --- a/balancer/weightedtarget/weightedtarget_test.go +++ b/balancer/weightedtarget/weightedtarget_test.go @@ -36,6 +36,7 @@ import ( "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/hierarchy" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/stats" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -129,12 +130,14 @@ func (b *testConfigBalancer) UpdateClientConnState(s balancer.ClientConnState) e return fmt.Errorf("unexpected balancer config with type %T", s.BalancerConfig) } - addrsWithAttr := make([]resolver.Address, len(s.ResolverState.Addresses)) - for i, addr := range s.ResolverState.Addresses { - addrsWithAttr[i] = setConfigKey(addr, c.configStr) + for i, ep := range s.ResolverState.Endpoints { + addrsWithAttr := make([]resolver.Address, len(ep.Addresses)) + for j, addr := range ep.Addresses { + addrsWithAttr[j] = setConfigKey(addr, c.configStr) + } + s.ResolverState.Endpoints[i].Addresses = addrsWithAttr } s.BalancerConfig = nil - s.ResolverState.Addresses = addrsWithAttr return b.Balancer.UpdateClientConnState(s) } @@ -168,7 +171,9 @@ func init() { // which should trigger a transient failure state update. func (s) TestWeightedTarget(t *testing.T) { cc := testutils.NewBalancerClientConn(t) - wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer wtb.Close() // Start with "cluster_1: round_robin". @@ -188,7 +193,9 @@ func (s) TestWeightedTarget(t *testing.T) { // Send the config, and an address with hierarchy path ["cluster_1"]. addr1 := resolver.Address{Addr: testBackendAddrStrs[1], Attributes: nil} if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr1, []string{"cluster_1"})}}, + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}), + }}, BalancerConfig: config1, }); err != nil { t.Fatalf("failed to update ClientConn state: %v", err) @@ -229,7 +236,9 @@ func (s) TestWeightedTarget(t *testing.T) { // Send the config, and one address with hierarchy path "cluster_2". addr2 := resolver.Address{Addr: testBackendAddrStrs[2], Attributes: nil} if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr2, []string{"cluster_2"})}}, + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_2"}), + }}, BalancerConfig: config2, }); err != nil { t.Fatalf("failed to update ClientConn state: %v", err) @@ -277,17 +286,25 @@ func (s) TestWeightedTarget(t *testing.T) { // Send the config, and an address with hierarchy path ["cluster_2"]. addr3 := resolver.Address{Addr: testBackendAddrStrs[3], Attributes: nil} if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr3, []string{"cluster_2"})}}, + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr3}}, []string{"cluster_2"}), + }}, BalancerConfig: config3, }); err != nil { t.Fatalf("failed to update ClientConn state: %v", err) } verifyAddressInNewSubConn(t, cc, addr3) + // The same SubConn is closed by balancergroup, gracefulswitch and + // pickfirstleaf when they are closed. Remove duplicate events. + initialSC := scShutdown + for scShutdown == initialSC { + scShutdown = <-cc.ShutdownSubConnCh + } + // The subconn from the test_config_balancer should be shut down. - scShutdown = <-cc.ShutdownSubConnCh if scShutdown != sc2 { - t.Fatalf("ShutdownSubConn, want %v, got %v", sc1, scShutdown) + t.Fatalf("ShutdownSubConn, want %v, got %v", sc2, scShutdown) } scShutdown.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) @@ -330,7 +347,9 @@ func (s) TestWeightedTarget(t *testing.T) { // backends from the subBalancer. func (s) TestWeightedTarget_OneSubBalancer_AddRemoveBackend(t *testing.T) { cc := testutils.NewBalancerClientConn(t) - wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer wtb.Close() // Start with "cluster_1: round_robin". @@ -350,7 +369,9 @@ func (s) TestWeightedTarget_OneSubBalancer_AddRemoveBackend(t *testing.T) { // Send the config, and an address with hierarchy path ["cluster_1"]. addr1 := resolver.Address{Addr: testBackendAddrStrs[1]} if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr1, []string{"cluster_1"})}}, + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}), + }}, BalancerConfig: config, }); err != nil { t.Fatalf("failed to update ClientConn state: %v", err) @@ -375,9 +396,9 @@ func (s) TestWeightedTarget_OneSubBalancer_AddRemoveBackend(t *testing.T) { // Send two addresses. addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(addr1, []string{"cluster_1"}), - hierarchy.Set(addr2, []string{"cluster_1"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_1"}), }}, BalancerConfig: config, }); err != nil { @@ -401,7 +422,9 @@ func (s) TestWeightedTarget_OneSubBalancer_AddRemoveBackend(t *testing.T) { // Remove the first address. if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr2, []string{"cluster_1"})}}, + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_1"}), + }}, BalancerConfig: config, }); err != nil { t.Fatalf("failed to update ClientConn state: %v", err) @@ -428,7 +451,9 @@ func (s) TestWeightedTarget_OneSubBalancer_AddRemoveBackend(t *testing.T) { // weighted target balancer with two sub-balancers, each with one backend. func (s) TestWeightedTarget_TwoSubBalancers_OneBackend(t *testing.T) { cc := testutils.NewBalancerClientConn(t) - wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer wtb.Close() // Start with "cluster_1: test_config_balancer, cluster_2: test_config_balancer". @@ -453,16 +478,18 @@ func (s) TestWeightedTarget_TwoSubBalancers_OneBackend(t *testing.T) { addr1 := resolver.Address{Addr: testBackendAddrStrs[1]} addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(addr1, []string{"cluster_1"}), - hierarchy.Set(addr2, []string{"cluster_2"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_2"}), }}, BalancerConfig: config, }); err != nil { t.Fatalf("failed to update ClientConn state: %v", err) } - scs := waitForNewSubConns(t, cc, 2) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + scs := waitForNewSubConns(ctx, t, cc, 2) verifySubConnAddrs(t, scs, map[string][]resolver.Address{ "cluster_1": {addr1}, "cluster_2": {addr2}, @@ -472,13 +499,14 @@ func (s) TestWeightedTarget_TwoSubBalancers_OneBackend(t *testing.T) { sc1 := scs["cluster_1"][0].sc.(*testutils.TestSubConn) sc2 := scs["cluster_2"][0].sc.(*testutils.TestSubConn) + // The CONNECTING picker should be sent by all leaf pickfirst policies on + // receiving the first resolver update. + <-cc.NewPickerCh // Send state changes for both SubConns, and wait for the picker. sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - <-cc.NewPickerCh sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) <-cc.NewPickerCh sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - <-cc.NewPickerCh sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) p := <-cc.NewPickerCh @@ -494,7 +522,9 @@ func (s) TestWeightedTarget_TwoSubBalancers_OneBackend(t *testing.T) { // backend. func (s) TestWeightedTarget_TwoSubBalancers_MoreBackends(t *testing.T) { cc := testutils.NewBalancerClientConn(t) - wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer wtb.Close() // Start with "cluster_1: round_robin, cluster_2: round_robin". @@ -521,18 +551,20 @@ func (s) TestWeightedTarget_TwoSubBalancers_MoreBackends(t *testing.T) { addr3 := resolver.Address{Addr: testBackendAddrStrs[3]} addr4 := resolver.Address{Addr: testBackendAddrStrs[4]} if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(addr1, []string{"cluster_1"}), - hierarchy.Set(addr2, []string{"cluster_1"}), - hierarchy.Set(addr3, []string{"cluster_2"}), - hierarchy.Set(addr4, []string{"cluster_2"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr3}}, []string{"cluster_2"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr4}}, []string{"cluster_2"}), }}, BalancerConfig: config, }); err != nil { t.Fatalf("failed to update ClientConn state: %v", err) } - scs := waitForNewSubConns(t, cc, 4) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + scs := waitForNewSubConns(ctx, t, cc, 4) verifySubConnAddrs(t, scs, map[string][]resolver.Address{ "cluster_1": {addr1, addr2}, "cluster_2": {addr3, addr4}, @@ -544,21 +576,21 @@ func (s) TestWeightedTarget_TwoSubBalancers_MoreBackends(t *testing.T) { sc3 := scs["cluster_2"][0].sc.(*testutils.TestSubConn) sc4 := scs["cluster_2"][1].sc.(*testutils.TestSubConn) + // The CONNECTING picker should be sent by all leaf pickfirst policies on + // receiving the first resolver update. + <-cc.NewPickerCh + // Send state changes for all SubConns, and wait for the picker. sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - <-cc.NewPickerCh sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) <-cc.NewPickerCh sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - <-cc.NewPickerCh sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) <-cc.NewPickerCh sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - <-cc.NewPickerCh sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) <-cc.NewPickerCh sc4.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - <-cc.NewPickerCh sc4.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) p := <-cc.NewPickerCh @@ -570,7 +602,13 @@ func (s) TestWeightedTarget_TwoSubBalancers_MoreBackends(t *testing.T) { } // Turn sc2's connection down, should be RR between balancers. - sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + wantSubConnErr := errors.New("subConn connection error") + sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Idle}) + sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + sc2.UpdateState(balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + ConnectionError: wantSubConnErr, + }) p = <-cc.NewPickerCh want = []balancer.SubConn{sc1, sc1, sc3, sc4} if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { @@ -579,10 +617,10 @@ func (s) TestWeightedTarget_TwoSubBalancers_MoreBackends(t *testing.T) { // Shut down subConn corresponding to addr3. if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(addr1, []string{"cluster_1"}), - hierarchy.Set(addr2, []string{"cluster_1"}), - hierarchy.Set(addr4, []string{"cluster_2"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr4}}, []string{"cluster_2"}), }}, BalancerConfig: config, }); err != nil { @@ -600,7 +638,8 @@ func (s) TestWeightedTarget_TwoSubBalancers_MoreBackends(t *testing.T) { } // Turn sc1's connection down. - wantSubConnErr := errors.New("subConn connection error") + sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Idle}) + sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) sc1.UpdateState(balancer.SubConnState{ ConnectivityState: connectivity.TransientFailure, ConnectionError: wantSubConnErr, @@ -612,6 +651,7 @@ func (s) TestWeightedTarget_TwoSubBalancers_MoreBackends(t *testing.T) { } // Turn last connection to connecting. + sc4.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Idle}) sc4.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) p = <-cc.NewPickerCh for i := 0; i < 5; i++ { @@ -626,8 +666,6 @@ func (s) TestWeightedTarget_TwoSubBalancers_MoreBackends(t *testing.T) { ConnectionError: wantSubConnErr, }) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() if err := cc.WaitForPicker(ctx, pickAndCheckError(wantSubConnErr)); err != nil { t.Fatal(err) } @@ -638,7 +676,9 @@ func (s) TestWeightedTarget_TwoSubBalancers_MoreBackends(t *testing.T) { // differing weights. func (s) TestWeightedTarget_TwoSubBalancers_DifferentWeight_MoreBackends(t *testing.T) { cc := testutils.NewBalancerClientConn(t) - wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer wtb.Close() // Start with two subBalancers, one with twice the weight of the other. @@ -665,18 +705,20 @@ func (s) TestWeightedTarget_TwoSubBalancers_DifferentWeight_MoreBackends(t *test addr3 := resolver.Address{Addr: testBackendAddrStrs[3]} addr4 := resolver.Address{Addr: testBackendAddrStrs[4]} if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(addr1, []string{"cluster_1"}), - hierarchy.Set(addr2, []string{"cluster_1"}), - hierarchy.Set(addr3, []string{"cluster_2"}), - hierarchy.Set(addr4, []string{"cluster_2"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr3}}, []string{"cluster_2"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr4}}, []string{"cluster_2"}), }}, BalancerConfig: config, }); err != nil { t.Fatalf("failed to update ClientConn state: %v", err) } - scs := waitForNewSubConns(t, cc, 4) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + scs := waitForNewSubConns(ctx, t, cc, 4) verifySubConnAddrs(t, scs, map[string][]resolver.Address{ "cluster_1": {addr1, addr2}, "cluster_2": {addr3, addr4}, @@ -688,21 +730,21 @@ func (s) TestWeightedTarget_TwoSubBalancers_DifferentWeight_MoreBackends(t *test sc3 := scs["cluster_2"][0].sc.(*testutils.TestSubConn) sc4 := scs["cluster_2"][1].sc.(*testutils.TestSubConn) + // The CONNECTING picker should be sent by all leaf pickfirst policies on + // receiving the first resolver update. + <-cc.NewPickerCh + // Send state changes for all SubConns, and wait for the picker. sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - <-cc.NewPickerCh sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) <-cc.NewPickerCh sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - <-cc.NewPickerCh sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) <-cc.NewPickerCh sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - <-cc.NewPickerCh sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) <-cc.NewPickerCh sc4.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - <-cc.NewPickerCh sc4.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) p := <-cc.NewPickerCh @@ -719,7 +761,9 @@ func (s) TestWeightedTarget_TwoSubBalancers_DifferentWeight_MoreBackends(t *test // the subBalancers. func (s) TestWeightedTarget_ThreeSubBalancers_RemoveBalancer(t *testing.T) { cc := testutils.NewBalancerClientConn(t) - wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer wtb.Close() // Start with two subBalancers, one with twice the weight of the other. @@ -749,17 +793,19 @@ func (s) TestWeightedTarget_ThreeSubBalancers_RemoveBalancer(t *testing.T) { addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} addr3 := resolver.Address{Addr: testBackendAddrStrs[3]} if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(addr1, []string{"cluster_1"}), - hierarchy.Set(addr2, []string{"cluster_2"}), - hierarchy.Set(addr3, []string{"cluster_3"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_2"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr3}}, []string{"cluster_3"}), }}, BalancerConfig: config, }); err != nil { t.Fatalf("failed to update ClientConn state: %v", err) } - scs := waitForNewSubConns(t, cc, 3) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + scs := waitForNewSubConns(ctx, t, cc, 3) verifySubConnAddrs(t, scs, map[string][]resolver.Address{ "cluster_1": {addr1}, "cluster_2": {addr2}, @@ -772,16 +818,16 @@ func (s) TestWeightedTarget_ThreeSubBalancers_RemoveBalancer(t *testing.T) { sc3 := scs["cluster_3"][0].sc.(*testutils.TestSubConn) // Send state changes for all SubConns, and wait for the picker. - sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + // The CONNECTING picker should be sent by all leaf pickfirst policies on + // receiving the first resolver update. <-cc.NewPickerCh + sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) <-cc.NewPickerCh sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - <-cc.NewPickerCh sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) <-cc.NewPickerCh sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - <-cc.NewPickerCh sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) p := <-cc.NewPickerCh @@ -808,9 +854,9 @@ func (s) TestWeightedTarget_ThreeSubBalancers_RemoveBalancer(t *testing.T) { t.Fatalf("failed to parse balancer config: %v", err) } if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(addr1, []string{"cluster_1"}), - hierarchy.Set(addr3, []string{"cluster_3"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr3}}, []string{"cluster_3"}), }}, BalancerConfig: config, }); err != nil { @@ -852,14 +898,20 @@ func (s) TestWeightedTarget_ThreeSubBalancers_RemoveBalancer(t *testing.T) { t.Fatalf("failed to parse balancer config: %v", err) } if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(addr3, []string{"cluster_3"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr3}}, []string{"cluster_3"}), }}, BalancerConfig: config, }); err != nil { t.Fatalf("failed to update ClientConn state: %v", err) } + // The same SubConn is closed by balancergroup, gracefulswitch and + // pickfirstleaf when they are closed. Remove duplicate events. + initialSC := scShutdown + for scShutdown == initialSC { + scShutdown = <-cc.ShutdownSubConnCh + } // Removing a subBalancer causes the weighted target LB policy to push a new // picker which ensures that the removed subBalancer is not picked for RPCs. @@ -868,8 +920,6 @@ func (s) TestWeightedTarget_ThreeSubBalancers_RemoveBalancer(t *testing.T) { t.Fatalf("ShutdownSubConn, want %v, got %v", sc1, scShutdown) } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() if err := cc.WaitForPicker(ctx, pickAndCheckError(wantSubConnErr)); err != nil { t.Fatal(err) } @@ -880,7 +930,9 @@ func (s) TestWeightedTarget_ThreeSubBalancers_RemoveBalancer(t *testing.T) { // change the weight of these subBalancers. func (s) TestWeightedTarget_TwoSubBalancers_ChangeWeight_MoreBackends(t *testing.T) { cc := testutils.NewBalancerClientConn(t) - wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer wtb.Close() // Start with two subBalancers, one with twice the weight of the other. @@ -907,18 +959,20 @@ func (s) TestWeightedTarget_TwoSubBalancers_ChangeWeight_MoreBackends(t *testing addr3 := resolver.Address{Addr: testBackendAddrStrs[3]} addr4 := resolver.Address{Addr: testBackendAddrStrs[4]} if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(addr1, []string{"cluster_1"}), - hierarchy.Set(addr2, []string{"cluster_1"}), - hierarchy.Set(addr3, []string{"cluster_2"}), - hierarchy.Set(addr4, []string{"cluster_2"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr3}}, []string{"cluster_2"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr4}}, []string{"cluster_2"}), }}, BalancerConfig: config, }); err != nil { t.Fatalf("failed to update ClientConn state: %v", err) } - scs := waitForNewSubConns(t, cc, 4) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + scs := waitForNewSubConns(ctx, t, cc, 4) verifySubConnAddrs(t, scs, map[string][]resolver.Address{ "cluster_1": {addr1, addr2}, "cluster_2": {addr3, addr4}, @@ -930,21 +984,21 @@ func (s) TestWeightedTarget_TwoSubBalancers_ChangeWeight_MoreBackends(t *testing sc3 := scs["cluster_2"][0].sc.(*testutils.TestSubConn) sc4 := scs["cluster_2"][1].sc.(*testutils.TestSubConn) + // The CONNECTING picker should be sent by all leaf pickfirst policies on + // receiving the first resolver update. + <-cc.NewPickerCh + // Send state changes for all SubConns, and wait for the picker. sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - <-cc.NewPickerCh sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) <-cc.NewPickerCh sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - <-cc.NewPickerCh sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) <-cc.NewPickerCh sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - <-cc.NewPickerCh sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) <-cc.NewPickerCh sc4.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - <-cc.NewPickerCh sc4.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) p := <-cc.NewPickerCh @@ -973,11 +1027,11 @@ func (s) TestWeightedTarget_TwoSubBalancers_ChangeWeight_MoreBackends(t *testing t.Fatalf("failed to parse balancer config: %v", err) } if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(addr1, []string{"cluster_1"}), - hierarchy.Set(addr2, []string{"cluster_1"}), - hierarchy.Set(addr3, []string{"cluster_2"}), - hierarchy.Set(addr4, []string{"cluster_2"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr3}}, []string{"cluster_2"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr4}}, []string{"cluster_2"}), }}, BalancerConfig: config, }); err != nil { @@ -998,7 +1052,9 @@ func (s) TestWeightedTarget_TwoSubBalancers_ChangeWeight_MoreBackends(t *testing // other sub-balancer. func (s) TestWeightedTarget_InitOneSubBalancerTransientFailure(t *testing.T) { cc := testutils.NewBalancerClientConn(t) - wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer wtb.Close() // Start with "cluster_1: test_config_balancer, cluster_2: test_config_balancer". @@ -1023,16 +1079,18 @@ func (s) TestWeightedTarget_InitOneSubBalancerTransientFailure(t *testing.T) { addr1 := resolver.Address{Addr: testBackendAddrStrs[1]} addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(addr1, []string{"cluster_1"}), - hierarchy.Set(addr2, []string{"cluster_2"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr1}}, []string{"cluster_1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr2}}, []string{"cluster_2"}), }}, BalancerConfig: config, }); err != nil { t.Fatalf("failed to update ClientConn state: %v", err) } - scs := waitForNewSubConns(t, cc, 2) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + scs := waitForNewSubConns(ctx, t, cc, 2) verifySubConnAddrs(t, scs, map[string][]resolver.Address{ "cluster_1": {addr1}, "cluster_2": {addr2}, @@ -1060,7 +1118,9 @@ func (s) TestWeightedTarget_InitOneSubBalancerTransientFailure(t *testing.T) { // return transient failure error. func (s) TestBalancerGroup_SubBalancerTurnsConnectingFromTransientFailure(t *testing.T) { cc := testutils.NewBalancerClientConn(t) - wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer wtb.Close() // Start with "cluster_1: test_config_balancer, cluster_2: test_config_balancer". @@ -1084,17 +1144,21 @@ func (s) TestBalancerGroup_SubBalancerTurnsConnectingFromTransientFailure(t *tes // Send the config with one address for each cluster. addr1 := resolver.Address{Addr: testBackendAddrStrs[1]} addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} + ep1 := resolver.Endpoint{Addresses: []resolver.Address{addr1}} + ep2 := resolver.Endpoint{Addresses: []resolver.Address{addr2}} if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(addr1, []string{"cluster_1"}), - hierarchy.Set(addr2, []string{"cluster_2"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(ep1, []string{"cluster_1"}), + hierarchy.SetInEndpoint(ep2, []string{"cluster_2"}), }}, BalancerConfig: config, }); err != nil { t.Fatalf("failed to update ClientConn state: %v", err) } - scs := waitForNewSubConns(t, cc, 2) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + scs := waitForNewSubConns(ctx, t, cc, 2) verifySubConnAddrs(t, scs, map[string][]resolver.Address{ "cluster_1": {addr1}, "cluster_2": {addr2}, @@ -1139,13 +1203,13 @@ func (s) TestBalancerGroup_SubBalancerTurnsConnectingFromTransientFailure(t *tes } } -// Verify that a SubConn is created with the expected address and hierarchy -// path cleared. +// Verify that a SubConn is created with the expected address. func verifyAddressInNewSubConn(t *testing.T, cc *testutils.BalancerClientConn, addr resolver.Address) { t.Helper() gotAddr := <-cc.NewSubConnAddrsCh - wantAddr := []resolver.Address{hierarchy.Set(addr, []string{})} + wantAddr := []resolver.Address{addr} + gotAddr[0].BalancerAttributes = nil if diff := cmp.Diff(gotAddr, wantAddr, cmp.AllowUnexported(attributes.Attributes{})); diff != "" { t.Fatalf("got unexpected new subconn addrs: %v", diff) } @@ -1163,12 +1227,17 @@ type subConnWithAddr struct { // // Returned value is a map from subBalancer (identified by its config) to // subConns created by it. -func waitForNewSubConns(t *testing.T, cc *testutils.BalancerClientConn, num int) map[string][]subConnWithAddr { +func waitForNewSubConns(ctx context.Context, t *testing.T, cc *testutils.BalancerClientConn, num int) map[string][]subConnWithAddr { t.Helper() scs := make(map[string][]subConnWithAddr) for i := 0; i < num; i++ { - addrs := <-cc.NewSubConnAddrsCh + var addrs []resolver.Address + select { + case <-ctx.Done(): + t.Fatalf("Timed out waiting for addresses for new SubConn.") + case addrs = <-cc.NewSubConnAddrsCh: + } if len(addrs) != 1 { t.Fatalf("received subConns with %d addresses, want 1", len(addrs)) } @@ -1176,7 +1245,12 @@ func waitForNewSubConns(t *testing.T, cc *testutils.BalancerClientConn, num int) if !ok { t.Fatalf("received subConn address %v contains no attribute for balancer config", addrs[0]) } - sc := <-cc.NewSubConnCh + var sc balancer.SubConn + select { + case <-ctx.Done(): + t.Fatalf("Timed out waiting for new SubConn.") + case sc = <-cc.NewSubConnCh: + } scWithAddr := subConnWithAddr{sc: sc, addr: addrs[0]} scs[cfg] = append(scs[cfg], scWithAddr) } @@ -1234,7 +1308,9 @@ func init() { // state will be Idle. func (s) TestInitialIdle(t *testing.T) { cc := testutils.NewBalancerClientConn(t) - wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer wtb.Close() config, err := wtbParser.ParseConfig([]byte(` @@ -1253,7 +1329,9 @@ func (s) TestInitialIdle(t *testing.T) { // Send the config, and an address with hierarchy path ["cluster_1"]. addrs := []resolver.Address{{Addr: testBackendAddrStrs[0], Attributes: nil}} if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addrs[0], []string{"cds:cluster_1"})}}, + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addrs[0]}}, []string{"cds:cluster_1"}), + }}, BalancerConfig: config, }); err != nil { t.Fatalf("failed to update ClientConn state: %v", err) @@ -1276,7 +1354,9 @@ func (s) TestInitialIdle(t *testing.T) { func (s) TestIgnoreSubBalancerStateTransitions(t *testing.T) { cc := &tcc{BalancerClientConn: testutils.NewBalancerClientConn(t)} - wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer wtb.Close() config, err := wtbParser.ParseConfig([]byte(` @@ -1295,7 +1375,9 @@ func (s) TestIgnoreSubBalancerStateTransitions(t *testing.T) { // Send the config, and an address with hierarchy path ["cluster_1"]. addr := resolver.Address{Addr: testBackendAddrStrs[0], Attributes: nil} if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr, []string{"cluster_1"})}}, + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addr}}, []string{"cluster_1"}), + }}, BalancerConfig: config, }); err != nil { t.Fatalf("failed to update ClientConn state: %v", err) @@ -1335,7 +1417,9 @@ func (s) TestUpdateStatePauses(t *testing.T) { } stub.Register("update_state_balancer", balFuncs) - wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer wtb.Close() config, err := wtbParser.ParseConfig([]byte(` @@ -1354,7 +1438,9 @@ func (s) TestUpdateStatePauses(t *testing.T) { // Send the config, and an address with hierarchy path ["cluster_1"]. addrs := []resolver.Address{{Addr: testBackendAddrStrs[0], Attributes: nil}} if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addrs[0], []string{"cds:cluster_1"})}}, + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{addrs[0]}}, []string{"cds:cluster_1"}), + }}, BalancerConfig: config, }); err != nil { t.Fatalf("failed to update ClientConn state: %v", err) diff --git a/internal/balancergroup/balancergroup_test.go b/internal/balancergroup/balancergroup_test.go index e49e8135a1b7..158b5bb3b993 100644 --- a/internal/balancergroup/balancergroup_test.go +++ b/internal/balancergroup/balancergroup_test.go @@ -19,6 +19,7 @@ package balancergroup import ( "context" "encoding/json" + "errors" "fmt" "testing" "time" @@ -43,16 +44,19 @@ const ( ) var ( - rrBuilder = balancer.Get(roundrobin.Name) - testBalancerIDs = []string{"b1", "b2", "b3"} - testBackendAddrs []resolver.Address + rrBuilder = balancer.Get(roundrobin.Name) + testBalancerIDs = []string{"b1", "b2", "b3"} + testBackendAddrs []resolver.Address + testBackendEndpoints []resolver.Endpoint ) const testBackendAddrsCount = 12 func init() { for i := 0; i < testBackendAddrsCount; i++ { - testBackendAddrs = append(testBackendAddrs, resolver.Address{Addr: fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)}) + addr := resolver.Address{Addr: fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)} + testBackendAddrs = append(testBackendAddrs, addr) + testBackendEndpoints = append(testBackendEndpoints, resolver.Endpoint{Addresses: []resolver.Address{addr}}) } } @@ -78,8 +82,10 @@ func (s) TestBalancerGroup_start_close(t *testing.T) { gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) gator.Start() bg := New(Options{ - CC: cc, - BuildOpts: balancer.BuildOptions{}, + CC: cc, + BuildOpts: balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }, StateAggregator: gator, Logger: nil, SubBalancerCloseTimeout: time.Duration(0), @@ -89,28 +95,29 @@ func (s) TestBalancerGroup_start_close(t *testing.T) { // balancers. gator.Add(testBalancerIDs[0], 2) bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) + bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Endpoints: testBackendEndpoints[0:2]}}) gator.Add(testBalancerIDs[1], 1) bg.Add(testBalancerIDs[1], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) + bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Endpoints: testBackendEndpoints[2:4]}}) bg.Start() - m1 := make(map[resolver.Address]balancer.SubConn) + m1 := make(map[string]balancer.SubConn) for i := 0; i < 4; i++ { addrs := <-cc.NewSubConnAddrsCh sc := <-cc.NewSubConnCh - m1[addrs[0]] = sc + m1[addrs[0].Addr] = sc sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-sc.HealthUpdateDelivered.Done() } // Test roundrobin on the last picker. p1 := <-cc.NewPickerCh want := []balancer.SubConn{ - m1[testBackendAddrs[0]], m1[testBackendAddrs[0]], - m1[testBackendAddrs[1]], m1[testBackendAddrs[1]], - m1[testBackendAddrs[2]], m1[testBackendAddrs[3]], + m1[testBackendAddrs[0].Addr], m1[testBackendAddrs[0].Addr], + m1[testBackendAddrs[1].Addr], m1[testBackendAddrs[1].Addr], + m1[testBackendAddrs[2].Addr], m1[testBackendAddrs[3].Addr], } if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p1)); err != nil { t.Fatalf("want %v, got %v", want, err) @@ -125,7 +132,7 @@ func (s) TestBalancerGroup_start_close(t *testing.T) { // Add b3, weight 1, backends [1,2]. gator.Add(testBalancerIDs[2], 1) bg.Add(testBalancerIDs[2], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[2], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[1:3]}}) + bg.UpdateClientConnState(testBalancerIDs[2], balancer.ClientConnState{ResolverState: resolver.State{Endpoints: testBackendEndpoints[1:3]}}) // Remove b1. gator.Remove(testBalancerIDs[0]) @@ -133,26 +140,27 @@ func (s) TestBalancerGroup_start_close(t *testing.T) { // Update b2 to weight 3, backends [0,3]. gator.UpdateWeight(testBalancerIDs[1], 3) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: append([]resolver.Address(nil), testBackendAddrs[0], testBackendAddrs[3])}}) + bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Endpoints: append([]resolver.Endpoint(nil), testBackendEndpoints[0], testBackendEndpoints[3])}}) gator.Start() bg.Start() - m2 := make(map[resolver.Address]balancer.SubConn) + m2 := make(map[string]balancer.SubConn) for i := 0; i < 4; i++ { addrs := <-cc.NewSubConnAddrsCh sc := <-cc.NewSubConnCh - m2[addrs[0]] = sc + m2[addrs[0].Addr] = sc sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-sc.HealthUpdateDelivered.Done() } // Test roundrobin on the last picker. p2 := <-cc.NewPickerCh want = []balancer.SubConn{ - m2[testBackendAddrs[0]], m2[testBackendAddrs[0]], m2[testBackendAddrs[0]], - m2[testBackendAddrs[3]], m2[testBackendAddrs[3]], m2[testBackendAddrs[3]], - m2[testBackendAddrs[1]], m2[testBackendAddrs[2]], + m2[testBackendAddrs[0].Addr], m2[testBackendAddrs[0].Addr], m2[testBackendAddrs[0].Addr], + m2[testBackendAddrs[3].Addr], m2[testBackendAddrs[3].Addr], m2[testBackendAddrs[3].Addr], + m2[testBackendAddrs[1].Addr], m2[testBackendAddrs[2].Addr], } if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p2)); err != nil { t.Fatalf("want %v, got %v", want, err) @@ -181,8 +189,10 @@ func (s) TestBalancerGroup_start_close_deadlock(t *testing.T) { gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) gator.Start() bg := New(Options{ - CC: cc, - BuildOpts: balancer.BuildOptions{}, + CC: cc, + BuildOpts: balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }, StateAggregator: gator, Logger: nil, SubBalancerCloseTimeout: time.Duration(0), @@ -204,13 +214,15 @@ func (s) TestBalancerGroup_start_close_deadlock(t *testing.T) { // Two rr balancers are added to bg, each with 2 ready subConns. A sub-balancer // is removed later, so the balancer group returned has one sub-balancer in its // own map, and one sub-balancer in cache. -func initBalancerGroupForCachingTest(t *testing.T, idleCacheTimeout time.Duration) (*weightedaggregator.Aggregator, *BalancerGroup, *testutils.BalancerClientConn, map[resolver.Address]*testutils.TestSubConn) { +func initBalancerGroupForCachingTest(t *testing.T, idleCacheTimeout time.Duration) (*weightedaggregator.Aggregator, *BalancerGroup, *testutils.BalancerClientConn, map[string]*testutils.TestSubConn) { cc := testutils.NewBalancerClientConn(t) gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) gator.Start() bg := New(Options{ - CC: cc, - BuildOpts: balancer.BuildOptions{}, + CC: cc, + BuildOpts: balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }, StateAggregator: gator, Logger: nil, SubBalancerCloseTimeout: idleCacheTimeout, @@ -220,28 +232,29 @@ func initBalancerGroupForCachingTest(t *testing.T, idleCacheTimeout time.Duratio // balancers. gator.Add(testBalancerIDs[0], 2) bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) + bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Endpoints: testBackendEndpoints[0:2]}}) gator.Add(testBalancerIDs[1], 1) bg.Add(testBalancerIDs[1], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) + bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Endpoints: testBackendEndpoints[2:4]}}) bg.Start() - m1 := make(map[resolver.Address]*testutils.TestSubConn) + m1 := make(map[string]*testutils.TestSubConn) for i := 0; i < 4; i++ { addrs := <-cc.NewSubConnAddrsCh sc := <-cc.NewSubConnCh - m1[addrs[0]] = sc + m1[addrs[0].Addr] = sc sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-sc.HealthUpdateDelivered.Done() } // Test roundrobin on the last picker. p1 := <-cc.NewPickerCh want := []balancer.SubConn{ - m1[testBackendAddrs[0]], m1[testBackendAddrs[0]], - m1[testBackendAddrs[1]], m1[testBackendAddrs[1]], - m1[testBackendAddrs[2]], m1[testBackendAddrs[3]], + m1[testBackendAddrs[0].Addr], m1[testBackendAddrs[0].Addr], + m1[testBackendAddrs[1].Addr], m1[testBackendAddrs[1].Addr], + m1[testBackendAddrs[2].Addr], m1[testBackendAddrs[3].Addr], } if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p1)); err != nil { t.Fatalf("want %v, got %v", want, err) @@ -262,7 +275,7 @@ func initBalancerGroupForCachingTest(t *testing.T, idleCacheTimeout time.Duratio // Test roundrobin on the with only sub-balancer0. p2 := <-cc.NewPickerCh want = []balancer.SubConn{ - m1[testBackendAddrs[0]], m1[testBackendAddrs[1]], + m1[testBackendAddrs[0].Addr], m1[testBackendAddrs[1].Addr], } if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p2)); err != nil { t.Fatalf("want %v, got %v", want, err) @@ -278,7 +291,10 @@ func (s) TestBalancerGroup_locality_caching(t *testing.T) { // Turn down subconn for addr2, shouldn't get picker update because // sub-balancer1 was removed. - addrToSC[testBackendAddrs[2]].UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + addrToSC[testBackendAddrs[2].Addr].UpdateState(balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + ConnectionError: errors.New("test error"), + }) for i := 0; i < 10; i++ { select { case <-cc.NewPickerCh: @@ -296,10 +312,10 @@ func (s) TestBalancerGroup_locality_caching(t *testing.T) { p3 := <-cc.NewPickerCh want := []balancer.SubConn{ - addrToSC[testBackendAddrs[0]], addrToSC[testBackendAddrs[0]], - addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[1]], + addrToSC[testBackendAddrs[0].Addr], addrToSC[testBackendAddrs[0].Addr], + addrToSC[testBackendAddrs[1].Addr], addrToSC[testBackendAddrs[1].Addr], // addr2 is down, b2 only has addr3 in READY state. - addrToSC[testBackendAddrs[3]], addrToSC[testBackendAddrs[3]], + addrToSC[testBackendAddrs[3].Addr], addrToSC[testBackendAddrs[3].Addr], } if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p3)); err != nil { t.Fatalf("want %v, got %v", want, err) @@ -325,10 +341,10 @@ func (s) TestBalancerGroup_locality_caching_close_group(t *testing.T) { // The balancer group is closed. The subconns should be shutdown immediately. shutdownTimeout := time.After(time.Millisecond * 500) scToShutdown := map[balancer.SubConn]int{ - addrToSC[testBackendAddrs[0]]: 1, - addrToSC[testBackendAddrs[1]]: 1, - addrToSC[testBackendAddrs[2]]: 1, - addrToSC[testBackendAddrs[3]]: 1, + addrToSC[testBackendAddrs[0].Addr]: 1, + addrToSC[testBackendAddrs[1].Addr]: 1, + addrToSC[testBackendAddrs[2].Addr]: 1, + addrToSC[testBackendAddrs[3].Addr]: 1, } for i := 0; i < len(scToShutdown); i++ { select { @@ -353,11 +369,13 @@ func (s) TestBalancerGroup_locality_caching_not_read_within_timeout(t *testing.T // shut down. ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() + // The same SubConn is closed by balancergroup, gracefulswitch and + // pickfirstleaf when they are closed. scToShutdown := map[balancer.SubConn]int{ - addrToSC[testBackendAddrs[2]]: 1, - addrToSC[testBackendAddrs[3]]: 1, + addrToSC[testBackendAddrs[2].Addr]: 3, + addrToSC[testBackendAddrs[3].Addr]: 3, } - for i := 0; i < len(scToShutdown); i++ { + for i := 0; i < len(scToShutdown)*3; i++ { select { case sc := <-cc.ShutdownSubConnCh: c := scToShutdown[sc] @@ -399,11 +417,13 @@ func (s) TestBalancerGroup_locality_caching_read_with_different_builder(t *testi // The cached sub-balancer should be closed, and the subconns should be // shut down immediately. shutdownTimeout := time.After(time.Millisecond * 500) + // The same SubConn is closed by balancergroup, gracefulswitch and + // pickfirstleaf when they are closed. scToShutdown := map[balancer.SubConn]int{ - addrToSC[testBackendAddrs[2]]: 1, - addrToSC[testBackendAddrs[3]]: 1, + addrToSC[testBackendAddrs[2].Addr]: 3, + addrToSC[testBackendAddrs[3].Addr]: 3, } - for i := 0; i < len(scToShutdown); i++ { + for i := 0; i < len(scToShutdown)*3; i++ { select { case sc := <-cc.ShutdownSubConnCh: c := scToShutdown[sc] @@ -416,25 +436,26 @@ func (s) TestBalancerGroup_locality_caching_read_with_different_builder(t *testi } } - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[4:6]}}) + bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Endpoints: testBackendEndpoints[4:6]}}) newSCTimeout := time.After(time.Millisecond * 500) - scToAdd := map[resolver.Address]int{ - testBackendAddrs[4]: 1, - testBackendAddrs[5]: 1, + scToAdd := map[string]int{ + testBackendAddrs[4].Addr: 1, + testBackendAddrs[5].Addr: 1, } for i := 0; i < len(scToAdd); i++ { select { case addr := <-cc.NewSubConnAddrsCh: - c := scToAdd[addr[0]] + c := scToAdd[addr[0].Addr] if c == 0 { t.Fatalf("Got newSubConn for %v when there's %d new expected", addr, c) } - scToAdd[addr[0]] = c - 1 + scToAdd[addr[0].Addr] = c - 1 sc := <-cc.NewSubConnCh - addrToSC[addr[0]] = sc + addrToSC[addr[0].Addr] = sc sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-sc.HealthUpdateDelivered.Done() case <-newSCTimeout: t.Fatalf("timeout waiting for subConns (from new sub-balancer) to be newed") } @@ -443,9 +464,9 @@ func (s) TestBalancerGroup_locality_caching_read_with_different_builder(t *testi // Test roundrobin on the new picker. p3 := <-cc.NewPickerCh want := []balancer.SubConn{ - addrToSC[testBackendAddrs[0]], addrToSC[testBackendAddrs[0]], - addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[1]], - addrToSC[testBackendAddrs[4]], addrToSC[testBackendAddrs[5]], + addrToSC[testBackendAddrs[0].Addr], addrToSC[testBackendAddrs[0].Addr], + addrToSC[testBackendAddrs[1].Addr], addrToSC[testBackendAddrs[1].Addr], + addrToSC[testBackendAddrs[4].Addr], addrToSC[testBackendAddrs[5].Addr], } if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p3)); err != nil { t.Fatalf("want %v, got %v", want, err) @@ -495,6 +516,7 @@ func (s) TestBalancerGroupBuildOptions(t *testing.T) { DialCreds: insecure.NewCredentials(), ChannelzParent: channelz.RegisterChannel(nil, "test channel"), CustomUserAgent: userAgent, + MetricsRecorder: &stats.NoopMetricsRecorder{}, } stub.Register(balancerName, stub.BalancerFuncs{ UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error { @@ -534,8 +556,10 @@ func (s) TestBalancerExitIdleOne(t *testing.T) { }) cc := testutils.NewBalancerClientConn(t) bg := New(Options{ - CC: cc, - BuildOpts: balancer.BuildOptions{}, + CC: cc, + BuildOpts: balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }, StateAggregator: nil, Logger: nil, }) @@ -566,32 +590,35 @@ func (s) TestBalancerGracefulSwitch(t *testing.T) { gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) gator.Start() bg := New(Options{ - CC: cc, - BuildOpts: balancer.BuildOptions{}, + CC: cc, + BuildOpts: balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }, StateAggregator: gator, Logger: nil, }) gator.Add(testBalancerIDs[0], 1) bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) + bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Endpoints: testBackendEndpoints[0:2]}}) bg.Start() defer bg.Close() - m1 := make(map[resolver.Address]balancer.SubConn) + m1 := make(map[string]balancer.SubConn) scs := make(map[balancer.SubConn]bool) for i := 0; i < 2; i++ { addrs := <-cc.NewSubConnAddrsCh sc := <-cc.NewSubConnCh - m1[addrs[0]] = sc + m1[addrs[0].Addr] = sc scs[sc] = true sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-sc.HealthUpdateDelivered.Done() } p1 := <-cc.NewPickerCh want := []balancer.SubConn{ - m1[testBackendAddrs[0]], m1[testBackendAddrs[1]], + m1[testBackendAddrs[0].Addr], m1[testBackendAddrs[1].Addr], } if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p1)); err != nil { t.Fatal(err) @@ -611,7 +638,7 @@ func (s) TestBalancerGracefulSwitch(t *testing.T) { bd.Data.(balancer.Balancer).Close() }, UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error { - ccs.ResolverState.Addresses = ccs.ResolverState.Addresses[1:] + ccs.ResolverState.Endpoints = ccs.ResolverState.Endpoints[1:] bal := bd.Data.(balancer.Balancer) return bal.UpdateClientConnState(ccs) }, @@ -622,7 +649,7 @@ func (s) TestBalancerGracefulSwitch(t *testing.T) { t.Fatalf("ParseConfig(%s) failed: %v", string(cfgJSON), err) } if err := bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}, + ResolverState: resolver.State{Endpoints: testBackendEndpoints[2:4]}, BalancerConfig: lbCfg, }); err != nil { t.Fatalf("error updating ClientConn state: %v", err) @@ -667,19 +694,22 @@ func (s) TestBalancerGracefulSwitch(t *testing.T) { // SubConns for the balancer being gracefully switched from to get deleted. ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - for i := 0; i < 2; i++ { + // The same SubConn is closed by balancergroup, gracefulswitch and + // pickfirstleaf when they are closed. + scToShutdown := map[balancer.SubConn]int{ + m1[testBackendAddrs[0].Addr]: 3, + m1[testBackendAddrs[1].Addr]: 3, + } + for i := 0; i < len(scToShutdown)*3; i++ { select { - case <-ctx.Done(): - t.Fatalf("error waiting for Shutdown()") case sc := <-cc.ShutdownSubConnCh: - // The SubConn shut down should have been one of the two created - // SubConns, and both should be deleted. - if ok := scs[sc]; ok { - delete(scs, sc) - continue - } else { - t.Fatalf("Shutdown called for wrong SubConn %v, want in %v", sc, scs) + c := scToShutdown[sc] + if c == 0 { + t.Fatalf("Got Shutdown for %v when there's %d shutdown expected", sc, c) } + scToShutdown[sc] = c - 1 + case <-ctx.Done(): + t.Fatalf("timeout waiting for subConns to be shut down") } } } diff --git a/internal/testutils/balancer.go b/internal/testutils/balancer.go index 5a446b147136..5a13150b1d84 100644 --- a/internal/testutils/balancer.go +++ b/internal/testutils/balancer.go @@ -33,12 +33,13 @@ import ( // TestSubConn implements the SubConn interface, to be used in tests. type TestSubConn struct { balancer.SubConn - tcc *BalancerClientConn // the CC that owns this SubConn - id string - ConnectCh chan struct{} - stateListener func(balancer.SubConnState) - connectCalled *grpcsync.Event - Addresses []resolver.Address + tcc *BalancerClientConn // the CC that owns this SubConn + id string + ConnectCh chan struct{} + stateListener func(balancer.SubConnState) + connectCalled *grpcsync.Event + Addresses []resolver.Address + HealthUpdateDelivered *grpcsync.Event } // NewTestSubConn returns a newly initialized SubConn. Typically, subconns @@ -46,9 +47,10 @@ type TestSubConn struct { // for some tests. func NewTestSubConn(id string) *TestSubConn { return &TestSubConn{ - ConnectCh: make(chan struct{}, 1), - connectCalled: grpcsync.NewEvent(), - id: id, + ConnectCh: make(chan struct{}, 1), + connectCalled: grpcsync.NewEvent(), + HealthUpdateDelivered: grpcsync.NewEvent(), + id: id, } } @@ -93,8 +95,15 @@ func (tsc *TestSubConn) String() string { return tsc.id } -// RegisterHealthListener is a no-op. -func (*TestSubConn) RegisterHealthListener(func(balancer.SubConnState)) {} +// RegisterHealthListener is send a READY update to mock a situation when no +// health checking mechanisms are configured. +func (tsc *TestSubConn) RegisterHealthListener(lis func(balancer.SubConnState)) { + // Call the listener in a separate gorouting to avoid deadlocks. + go func() { + lis(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + tsc.HealthUpdateDelivered.Fire() + }() +} // BalancerClientConn is a mock balancer.ClientConn used in tests. type BalancerClientConn struct { @@ -131,12 +140,13 @@ func NewBalancerClientConn(t *testing.T) *BalancerClientConn { // NewSubConn creates a new SubConn. func (tcc *BalancerClientConn) NewSubConn(a []resolver.Address, o balancer.NewSubConnOptions) (balancer.SubConn, error) { sc := &TestSubConn{ - tcc: tcc, - id: fmt.Sprintf("sc%d", tcc.subConnIdx), - ConnectCh: make(chan struct{}, 1), - stateListener: o.StateListener, - connectCalled: grpcsync.NewEvent(), - Addresses: a, + tcc: tcc, + id: fmt.Sprintf("sc%d", tcc.subConnIdx), + ConnectCh: make(chan struct{}, 1), + stateListener: o.StateListener, + connectCalled: grpcsync.NewEvent(), + HealthUpdateDelivered: grpcsync.NewEvent(), + Addresses: a, } tcc.subConnIdx++ tcc.logger.Logf("testClientConn: NewSubConn(%v, %+v) => %s", a, o, sc) diff --git a/internal/testutils/wrr.go b/internal/testutils/wrr.go index 5dcc7a8d37a4..9e0290b00a98 100644 --- a/internal/testutils/wrr.go +++ b/internal/testutils/wrr.go @@ -58,6 +58,9 @@ func (twrr *testWRR) Add(item any, weight int64) { func (twrr *testWRR) Next() any { twrr.mu.Lock() + if len(twrr.itemsWithWeight) == 0 { + return nil + } iww := twrr.itemsWithWeight[twrr.idx] twrr.count++ if twrr.count >= iww.weight { diff --git a/test/roundrobin_test.go b/test/roundrobin_test.go index 815e244946fe..bb69d576bdfb 100644 --- a/test/roundrobin_test.go +++ b/test/roundrobin_test.go @@ -51,6 +51,7 @@ func testRoundRobinBasic(ctx context.Context, t *testing.T, opts ...grpc.DialOpt const backendCount = 5 backends := make([]*stubserver.StubServer, backendCount) + endpoints := make([]resolver.Endpoint, backendCount) addrs := make([]resolver.Address, backendCount) for i := 0; i < backendCount; i++ { backend := &stubserver.StubServer{ @@ -64,6 +65,7 @@ func testRoundRobinBasic(ctx context.Context, t *testing.T, opts ...grpc.DialOpt backends[i] = backend addrs[i] = resolver.Address{Addr: backend.Address} + endpoints[i] = resolver.Endpoint{Addresses: []resolver.Address{addrs[i]}} } dopts := []grpc.DialOption{ @@ -87,7 +89,7 @@ func testRoundRobinBasic(ctx context.Context, t *testing.T, opts ...grpc.DialOpt t.Fatalf("EmptyCall() = %s, want %s", status.Code(err), codes.DeadlineExceeded) } - r.UpdateState(resolver.State{Addresses: addrs}) + r.UpdateState(resolver.State{Endpoints: endpoints}) if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs); err != nil { t.Fatal(err) } @@ -115,10 +117,10 @@ func (s) TestRoundRobin_AddressesRemoved(t *testing.T) { // Send a resolver update with no addresses. This should push the channel into // TransientFailure. - r.UpdateState(resolver.State{Addresses: []resolver.Address{}}) + r.UpdateState(resolver.State{Endpoints: []resolver.Endpoint{}}) testutils.AwaitState(ctx, t, cc, connectivity.TransientFailure) - const msgWant = "produced zero addresses" + const msgWant = "no children to pick from" client := testgrpc.NewTestServiceClient(cc) if _, err := client.EmptyCall(ctx, &testpb.Empty{}); !strings.Contains(status.Convert(err).Message(), msgWant) { t.Fatalf("EmptyCall() = %v, want Contains(Message(), %q)", err, msgWant) @@ -137,7 +139,7 @@ func (s) TestRoundRobin_NewAddressWhileBlocking(t *testing.T) { // Send a resolver update with no addresses. This should push the channel into // TransientFailure. - r.UpdateState(resolver.State{Addresses: []resolver.Address{}}) + r.UpdateState(resolver.State{Endpoints: []resolver.Endpoint{}}) testutils.AwaitState(ctx, t, cc, connectivity.TransientFailure) client := testgrpc.NewTestServiceClient(cc) @@ -173,7 +175,9 @@ func (s) TestRoundRobin_NewAddressWhileBlocking(t *testing.T) { // Send a resolver update with a valid backend to push the channel to Ready // and unblock the above RPC. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: backends[0].Address}}}) + r.UpdateState(resolver.State{Endpoints: []resolver.Endpoint{ + {Addresses: []resolver.Address{{Addr: backends[0].Address}}}, + }}) select { case <-ctx.Done(): @@ -268,7 +272,9 @@ func (s) TestRoundRobin_UpdateAddressAttributes(t *testing.T) { } // Set an initial resolver update with no address attributes. addr := resolver.Address{Addr: backend.Address} - r.InitialState(resolver.State{Addresses: []resolver.Address{addr}}) + r.InitialState(resolver.State{Endpoints: []resolver.Endpoint{ + {Addresses: []resolver.Address{addr}}, + }}) cc, err := grpc.NewClient(r.Scheme()+":///test.server", dopts...) if err != nil { t.Fatalf("grpc.NewClient() failed: %v", err) @@ -293,7 +299,9 @@ func (s) TestRoundRobin_UpdateAddressAttributes(t *testing.T) { // Send a resolver update with address attributes. addrWithAttributes := imetadata.Set(addr, metadata.Pairs(testMDKey, testMDValue)) - r.UpdateState(resolver.State{Addresses: []resolver.Address{addrWithAttributes}}) + r.UpdateState(resolver.State{Endpoints: []resolver.Endpoint{ + {Addresses: []resolver.Address{addrWithAttributes}}, + }}) // Make an RPC and ensure it contains the metadata we are looking for. The // resolver update isn't processed synchronously, so we wait some time before diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index f1e99fab9bf9..7f7814f7e3b3 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -38,6 +38,7 @@ import ( "google.golang.org/grpc/internal/grpctest" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/stats" "google.golang.org/grpc/internal/xds" "google.golang.org/grpc/internal/xds/bootstrap" "google.golang.org/grpc/resolver" @@ -62,8 +63,8 @@ const ( ) var ( - testBackendAddrs = []resolver.Address{{Addr: "1.1.1.1:1"}} - cmpOpts = cmp.Options{ + testBackendEndpoints = []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: "1.1.1.1:1"}}}} + cmpOpts = cmp.Options{ cmpopts.EquateEmpty(), cmpopts.IgnoreFields(load.Data{}, "ReportInterval"), } @@ -93,7 +94,9 @@ func (s) TestDropByCategory(t *testing.T) { builder := balancer.Get(Name) cc := testutils.NewBalancerClientConn(t) - b := builder.Build(cc, balancer.BuildOptions{}) + b := builder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer b.Close() const ( @@ -109,7 +112,7 @@ func (s) TestDropByCategory(t *testing.T) { t.Fatalf("Failed to create LRS server config for testing: %v", err) } if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), + ResolverState: xdsclient.SetClient(resolver.State{Endpoints: testBackendEndpoints}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName, EDSServiceName: testServiceName, @@ -205,7 +208,7 @@ func (s) TestDropByCategory(t *testing.T) { dropDenominator2 = 4 ) if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), + ResolverState: xdsclient.SetClient(resolver.State{Endpoints: testBackendEndpoints}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName, EDSServiceName: testServiceName, @@ -272,7 +275,9 @@ func (s) TestDropCircuitBreaking(t *testing.T) { builder := balancer.Get(Name) cc := testutils.NewBalancerClientConn(t) - b := builder.Build(cc, balancer.BuildOptions{}) + b := builder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer b.Close() var maxRequest uint32 = 50 @@ -284,7 +289,7 @@ func (s) TestDropCircuitBreaking(t *testing.T) { t.Fatalf("Failed to create LRS server config for testing: %v", err) } if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), + ResolverState: xdsclient.SetClient(resolver.State{Endpoints: testBackendEndpoints}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName, EDSServiceName: testServiceName, @@ -397,7 +402,9 @@ func (s) TestPickerUpdateAfterClose(t *testing.T) { builder := balancer.Get(Name) cc := testutils.NewBalancerClientConn(t) - b := builder.Build(cc, balancer.BuildOptions{}) + b := builder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) // Create a stub balancer which waits for the cluster_impl policy to be // closed before sending a picker update (upon receipt of a subConn state @@ -430,7 +437,7 @@ func (s) TestPickerUpdateAfterClose(t *testing.T) { var maxRequest uint32 = 50 if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), + ResolverState: xdsclient.SetClient(resolver.State{Endpoints: testBackendEndpoints}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName, EDSServiceName: testServiceName, @@ -470,11 +477,13 @@ func (s) TestClusterNameInAddressAttributes(t *testing.T) { builder := balancer.Get(Name) cc := testutils.NewBalancerClientConn(t) - b := builder.Build(cc, balancer.BuildOptions{}) + b := builder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer b.Close() if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), + ResolverState: xdsclient.SetClient(resolver.State{Endpoints: testBackendEndpoints}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName, EDSServiceName: testServiceName, @@ -494,7 +503,7 @@ func (s) TestClusterNameInAddressAttributes(t *testing.T) { } addrs1 := <-cc.NewSubConnAddrsCh - if got, want := addrs1[0].Addr, testBackendAddrs[0].Addr; got != want { + if got, want := addrs1[0].Addr, testBackendEndpoints[0].Addresses[0].Addr; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) } cn, ok := xds.GetXDSHandshakeClusterName(addrs1[0].Attributes) @@ -511,7 +520,7 @@ func (s) TestClusterNameInAddressAttributes(t *testing.T) { const testClusterName2 = "test-cluster-2" var addr2 = resolver.Address{Addr: "2.2.2.2"} if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{Addresses: []resolver.Address{addr2}}, xdsC), + ResolverState: xdsclient.SetClient(resolver.State{Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{addr2}}}}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName2, EDSServiceName: testServiceName, @@ -545,11 +554,13 @@ func (s) TestReResolution(t *testing.T) { builder := balancer.Get(Name) cc := testutils.NewBalancerClientConn(t) - b := builder.Build(cc, balancer.BuildOptions{}) + b := builder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer b.Close() if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), + ResolverState: xdsclient.SetClient(resolver.State{Endpoints: testBackendEndpoints}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName, EDSServiceName: testServiceName, @@ -568,7 +579,10 @@ func (s) TestReResolution(t *testing.T) { t.Fatal(err.Error()) } - sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + sc1.UpdateState(balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + ConnectionError: errors.New("test error"), + }) // This should get the transient failure picker. if err := cc.WaitForErrPicker(ctx); err != nil { t.Fatal(err.Error()) @@ -582,6 +596,7 @@ func (s) TestReResolution(t *testing.T) { } sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-sc1.HealthUpdateDelivered.Done() // Test pick with one backend. if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { t.Fatal(err.Error()) @@ -612,12 +627,17 @@ func (s) TestLoadReporting(t *testing.T) { builder := balancer.Get(Name) cc := testutils.NewBalancerClientConn(t) - b := builder.Build(cc, balancer.BuildOptions{}) + b := builder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer b.Close() - addrs := make([]resolver.Address, len(testBackendAddrs)) - for i, a := range testBackendAddrs { - addrs[i] = xdsinternal.SetLocalityID(a, testLocality) + endpoints := make([]resolver.Endpoint, len(testBackendEndpoints)) + for i, e := range testBackendEndpoints { + endpoints[i] = xdsinternal.SetLocalityIDInEndpoint(e, testLocality) + for j, a := range e.Addresses { + endpoints[i].Addresses[j] = xdsinternal.SetLocalityID(a, testLocality) + } } testLRSServerConfig, err := bootstrap.ServerConfigForTesting(bootstrap.ServerConfigTestingOptions{ URI: "trafficdirector.googleapis.com:443", @@ -627,7 +647,7 @@ func (s) TestLoadReporting(t *testing.T) { t.Fatalf("Failed to create LRS server config for testing: %v", err) } if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{Addresses: addrs}, xdsC), + ResolverState: xdsclient.SetClient(resolver.State{Endpoints: endpoints}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName, EDSServiceName: testServiceName, @@ -661,8 +681,9 @@ func (s) TestLoadReporting(t *testing.T) { scs := balancer.SubConnState{ConnectivityState: connectivity.Ready} sca := internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) - sca(&scs, addrs[0]) + sca(&scs, endpoints[0].Addresses[0]) sc1.UpdateState(scs) + <-sc1.HealthUpdateDelivered.Done() // Test pick with one backend. const successCount = 5 const errorCount = 5 @@ -745,12 +766,14 @@ func (s) TestUpdateLRSServer(t *testing.T) { builder := balancer.Get(Name) cc := testutils.NewBalancerClientConn(t) - b := builder.Build(cc, balancer.BuildOptions{}) + b := builder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer b.Close() - addrs := make([]resolver.Address, len(testBackendAddrs)) - for i, a := range testBackendAddrs { - addrs[i] = xdsinternal.SetLocalityID(a, testLocality) + endpoints := make([]resolver.Endpoint, len(testBackendEndpoints)) + for i, e := range testBackendEndpoints { + endpoints[i] = xdsinternal.SetLocalityIDInEndpoint(e, testLocality) } testLRSServerConfig, err := bootstrap.ServerConfigForTesting(bootstrap.ServerConfigTestingOptions{ URI: "trafficdirector.googleapis.com:443", @@ -760,7 +783,7 @@ func (s) TestUpdateLRSServer(t *testing.T) { t.Fatalf("Failed to create LRS server config for testing: %v", err) } if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{Addresses: addrs}, xdsC), + ResolverState: xdsclient.SetClient(resolver.State{Endpoints: endpoints}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName, EDSServiceName: testServiceName, @@ -794,7 +817,7 @@ func (s) TestUpdateLRSServer(t *testing.T) { // Update LRS server to a different name. if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{Addresses: addrs}, xdsC), + ResolverState: xdsclient.SetClient(resolver.State{Endpoints: endpoints}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName, EDSServiceName: testServiceName, @@ -819,7 +842,7 @@ func (s) TestUpdateLRSServer(t *testing.T) { // Update LRS server to nil, to disable LRS. if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{Addresses: addrs}, xdsC), + ResolverState: xdsclient.SetClient(resolver.State{Endpoints: endpoints}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName, EDSServiceName: testServiceName, @@ -848,7 +871,9 @@ func (s) TestChildPolicyUpdatedOnConfigUpdate(t *testing.T) { builder := balancer.Get(Name) cc := testutils.NewBalancerClientConn(t) - b := builder.Build(cc, balancer.BuildOptions{}) + b := builder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer b.Close() // Keep track of which child policy was updated @@ -876,7 +901,7 @@ func (s) TestChildPolicyUpdatedOnConfigUpdate(t *testing.T) { // Initial config update with childPolicyName1 if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), + ResolverState: xdsclient.SetClient(resolver.State{Endpoints: testBackendEndpoints}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName, ChildPolicy: &internalserviceconfig.BalancerConfig{ @@ -893,7 +918,7 @@ func (s) TestChildPolicyUpdatedOnConfigUpdate(t *testing.T) { // Second config update with childPolicyName2 if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), + ResolverState: xdsclient.SetClient(resolver.State{Endpoints: testBackendEndpoints}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName, ChildPolicy: &internalserviceconfig.BalancerConfig{ @@ -916,7 +941,9 @@ func (s) TestFailedToParseChildPolicyConfig(t *testing.T) { builder := balancer.Get(Name) cc := testutils.NewBalancerClientConn(t) - b := builder.Build(cc, balancer.BuildOptions{}) + b := builder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer b.Close() // Create a stub balancer which fails to ParseConfig. @@ -929,7 +956,7 @@ func (s) TestFailedToParseChildPolicyConfig(t *testing.T) { }) err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), + ResolverState: xdsclient.SetClient(resolver.State{Endpoints: testBackendEndpoints}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName, ChildPolicy: &internalserviceconfig.BalancerConfig{ @@ -976,7 +1003,9 @@ func (s) TestPickerUpdatedSynchronouslyOnConfigUpdate(t *testing.T) { builder := balancer.Get(Name) cc := testutils.NewBalancerClientConn(t) - b := builder.Build(cc, balancer.BuildOptions{}) + b := builder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer b.Close() // Create a stub balancer which waits for the cluster_impl policy to be @@ -992,7 +1021,7 @@ func (s) TestPickerUpdatedSynchronouslyOnConfigUpdate(t *testing.T) { }) if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), + ResolverState: xdsclient.SetClient(resolver.State{Endpoints: testBackendEndpoints}, xdsC), BalancerConfig: &LBConfig{ Cluster: testClusterName, EDSServiceName: testServiceName, diff --git a/xds/internal/balancer/clustermanager/clustermanager_test.go b/xds/internal/balancer/clustermanager/clustermanager_test.go index 6ef8738dfcf4..429907bc356c 100644 --- a/xds/internal/balancer/clustermanager/clustermanager_test.go +++ b/xds/internal/balancer/clustermanager/clustermanager_test.go @@ -78,7 +78,9 @@ func TestClusterPicks(t *testing.T) { cc := testutils.NewBalancerClientConn(t) builder := balancer.Get(balancerName) parser := builder.(balancer.ConfigParser) - bal := builder.Build(cc, balancer.BuildOptions{}) + bal := builder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) configJSON1 := `{ "children": { @@ -97,9 +99,9 @@ func TestClusterPicks(t *testing.T) { {Addr: testBackendAddrStrs[1], BalancerAttributes: nil}, } if err := bal.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), - hierarchy.Set(wantAddrs[1], []string{"cds:cluster_2"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[0]}}, []string{"cds:cluster_1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[1]}}, []string{"cds:cluster_2"}), }}, BalancerConfig: config1, }); err != nil { @@ -120,6 +122,7 @@ func TestClusterPicks(t *testing.T) { m1[addrs[0]] = sc sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-sc.HealthUpdateDelivered.Done() } p1 := <-cc.NewPickerCh @@ -157,7 +160,9 @@ func TestConfigUpdateAddCluster(t *testing.T) { cc := testutils.NewBalancerClientConn(t) builder := balancer.Get(balancerName) parser := builder.(balancer.ConfigParser) - bal := builder.Build(cc, balancer.BuildOptions{}) + bal := builder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) configJSON1 := `{ "children": { @@ -176,9 +181,9 @@ func TestConfigUpdateAddCluster(t *testing.T) { {Addr: testBackendAddrStrs[1], BalancerAttributes: nil}, } if err := bal.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), - hierarchy.Set(wantAddrs[1], []string{"cds:cluster_2"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[0]}}, []string{"cds:cluster_1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[1]}}, []string{"cds:cluster_2"}), }}, BalancerConfig: config1, }); err != nil { @@ -199,6 +204,7 @@ func TestConfigUpdateAddCluster(t *testing.T) { m1[addrs[0]] = sc sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-sc.HealthUpdateDelivered.Done() } p1 := <-cc.NewPickerCh @@ -244,10 +250,10 @@ func TestConfigUpdateAddCluster(t *testing.T) { } wantAddrs = append(wantAddrs, resolver.Address{Addr: testBackendAddrStrs[2], BalancerAttributes: nil}) if err := bal.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), - hierarchy.Set(wantAddrs[1], []string{"cds:cluster_2"}), - hierarchy.Set(wantAddrs[2], []string{"cds:cluster_3"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[0]}}, []string{"cds:cluster_1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[1]}}, []string{"cds:cluster_2"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[2]}}, []string{"cds:cluster_3"}), }}, BalancerConfig: config2, }); err != nil { @@ -315,7 +321,9 @@ func TestRoutingConfigUpdateDeleteAll(t *testing.T) { cc := testutils.NewBalancerClientConn(t) builder := balancer.Get(balancerName) parser := builder.(balancer.ConfigParser) - bal := builder.Build(cc, balancer.BuildOptions{}) + bal := builder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) configJSON1 := `{ "children": { @@ -334,9 +342,9 @@ func TestRoutingConfigUpdateDeleteAll(t *testing.T) { {Addr: testBackendAddrStrs[1], BalancerAttributes: nil}, } if err := bal.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), - hierarchy.Set(wantAddrs[1], []string{"cds:cluster_2"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[0]}}, []string{"cds:cluster_1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[1]}}, []string{"cds:cluster_2"}), }}, BalancerConfig: config1, }); err != nil { @@ -357,6 +365,7 @@ func TestRoutingConfigUpdateDeleteAll(t *testing.T) { m1[addrs[0]] = sc sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-sc.HealthUpdateDelivered.Done() } p1 := <-cc.NewPickerCh @@ -418,9 +427,9 @@ func TestRoutingConfigUpdateDeleteAll(t *testing.T) { // Resend the previous config with clusters if err := bal.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), - hierarchy.Set(wantAddrs[1], []string{"cds:cluster_2"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[0]}}, []string{"cds:cluster_1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[1]}}, []string{"cds:cluster_2"}), }}, BalancerConfig: config1, }); err != nil { @@ -441,6 +450,7 @@ func TestRoutingConfigUpdateDeleteAll(t *testing.T) { m2[addrs[0]] = sc sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-sc.HealthUpdateDelivered.Done() } p3 := <-cc.NewPickerCh @@ -484,6 +494,7 @@ func TestClusterManagerForwardsBalancerBuildOptions(t *testing.T) { bOpts := balancer.BuildOptions{ DialCreds: insecure.NewCredentials(), CustomUserAgent: userAgent, + MetricsRecorder: &stats.NoopMetricsRecorder{}, } stub.Register(t.Name(), stub.BalancerFuncs{ UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error { @@ -560,7 +571,9 @@ func TestInitialIdle(t *testing.T) { cc := testutils.NewBalancerClientConn(t) builder := balancer.Get(balancerName) parser := builder.(balancer.ConfigParser) - bal := builder.Build(cc, balancer.BuildOptions{}) + bal := builder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) configJSON1 := `{ "children": { @@ -577,8 +590,8 @@ func TestInitialIdle(t *testing.T) { {Addr: testBackendAddrStrs[0], BalancerAttributes: nil}, } if err := bal.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[0]}}, []string{"cds:cluster_1"}), }}, BalancerConfig: config1, }); err != nil { @@ -607,7 +620,9 @@ func TestClusterGracefulSwitch(t *testing.T) { cc := testutils.NewBalancerClientConn(t) builder := balancer.Get(balancerName) parser := builder.(balancer.ConfigParser) - bal := builder.Build(cc, balancer.BuildOptions{}) + bal := builder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer bal.Close() configJSON1 := `{ @@ -624,8 +639,8 @@ func TestClusterGracefulSwitch(t *testing.T) { {Addr: testBackendAddrStrs[1], BalancerAttributes: nil}, } if err := bal.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(wantAddrs[0], []string{"csp:cluster"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[0]}}, []string{"csp:cluster"}), }}, BalancerConfig: config1, }); err != nil { @@ -635,6 +650,7 @@ func TestClusterGracefulSwitch(t *testing.T) { sc1 := <-cc.NewSubConnCh sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-sc1.HealthUpdateDelivered.Done() p1 := <-cc.NewPickerCh pi := balancer.PickInfo{ Ctx: SetPickedCluster(context.Background(), "csp:cluster"), @@ -666,8 +682,8 @@ func TestClusterGracefulSwitch(t *testing.T) { t.Fatalf("failed to parse balancer config: %v", err) } if err := bal.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(wantAddrs[1], []string{"csp:cluster"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[1]}}, []string{"csp:cluster"}), }}, BalancerConfig: config2, }); err != nil { @@ -735,7 +751,9 @@ func (s) TestUpdateStatePauses(t *testing.T) { builder := balancer.Get(balancerName) parser := builder.(balancer.ConfigParser) - bal := builder.Build(cc, balancer.BuildOptions{}) + bal := builder.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer bal.Close() configJSON1 := `{ @@ -753,8 +771,8 @@ func (s) TestUpdateStatePauses(t *testing.T) { {Addr: testBackendAddrStrs[0], BalancerAttributes: nil}, } if err := bal.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), + ResolverState: resolver.State{Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{wantAddrs[0]}}, []string{"cds:cluster_1"}), }}, BalancerConfig: config1, }); err != nil { diff --git a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go index 89760f6fd23e..82f2549e877c 100644 --- a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go +++ b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go @@ -1135,8 +1135,8 @@ func waitForProducedZeroAddressesError(ctx context.Context, t *testing.T, client t.Logf("EmptyCall() returned code: %v, want: %v", code, codes.Unavailable) continue } - if !strings.Contains(err.Error(), "produced zero addresses") { - t.Logf("EmptyCall() = %v, want %v", err, "produced zero addresses") + if !strings.Contains(err.Error(), "no children to pick from") { + t.Logf("EmptyCall() = %v, want %v", err, "no children to pick from") continue } return nil diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index 79740dc24f71..0c4d6c259a88 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -20,6 +20,7 @@ package priority import ( "context" + "errors" "fmt" "testing" "time" @@ -32,6 +33,7 @@ import ( "google.golang.org/grpc/internal/hierarchy" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/stats" "google.golang.org/grpc/resolver" ) @@ -83,15 +85,17 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { cc := testutils.NewBalancerClientConn(t) bb := balancer.Get(Name) - pb := bb.Build(cc, balancer.BuildOptions{}) + pb := bb.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer pb.Close() // Two children, with priorities [0, 1], each with one backend. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ @@ -123,10 +127,10 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { // Add p2, it shouldn't cause any updates. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[2]}, []string{"child-2"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[2]}}}, []string{"child-2"}), }, }, BalancerConfig: &LBConfig{ @@ -157,9 +161,9 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { // Remove p2, no updates. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ @@ -197,15 +201,17 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { cc := testutils.NewBalancerClientConn(t) bb := balancer.Get(Name) - pb := bb.Build(cc, balancer.BuildOptions{}) + pb := bb.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer pb.Close() t.Log("Two localities, with priorities [0, 1], each with one backend.") if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ @@ -259,10 +265,10 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { t.Log("Add p2, it shouldn't cause any updates.") if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[2]}, []string{"child-2"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[2]}}}, []string{"child-2"}), }, }, BalancerConfig: &LBConfig{ @@ -286,7 +292,10 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { } t.Log("Turn down 1, use 2.") - sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + sc1.UpdateState(balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + ConnectionError: errors.New("test error"), + }) // Before 2 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. @@ -310,9 +319,9 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { t.Log("Remove 2, use 1.") if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ @@ -360,15 +369,17 @@ func (s) TestPriority_HighPriorityToConnectingFromReady(t *testing.T) { cc := testutils.NewBalancerClientConn(t) bb := balancer.Get(Name) - pb := bb.Build(cc, balancer.BuildOptions{}) + pb := bb.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer pb.Close() // Two localities, with priorities [0, 1], each with one backend. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ @@ -444,15 +455,17 @@ func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { cc := testutils.NewBalancerClientConn(t) bb := balancer.Get(Name) - pb := bb.Build(cc, balancer.BuildOptions{}) + pb := bb.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer pb.Close() // Two localities, with different priorities, each with one backend. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ @@ -499,10 +512,10 @@ func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { t.Log("Add p2, it should create a new SubConn.") if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[2]}, []string{"child-2"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[2]}}}, []string{"child-2"}), }, }, BalancerConfig: &LBConfig{ @@ -545,16 +558,18 @@ func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) { cc := testutils.NewBalancerClientConn(t) bb := balancer.Get(Name) - pb := bb.Build(cc, balancer.BuildOptions{}) + pb := bb.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer pb.Close() // Three localities, with priorities [0,1,2], each with one backend. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[2]}, []string{"child-2"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[2]}}}, []string{"child-2"}), }, }, BalancerConfig: &LBConfig{ @@ -617,7 +632,14 @@ func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) { // // With localities caching, the lower priorities are closed after a timeout, // in goroutines. The order is no longer guaranteed. - scToShutdown := []balancer.SubConn{<-cc.ShutdownSubConnCh, <-cc.ShutdownSubConnCh} + firstSC := <-cc.ShutdownSubConnCh + secondSC := <-cc.ShutdownSubConnCh + // The same SubConn is closed by balancergroup, gracefulswitch and + // pickfirstleaf when they are closed. Remove duplicate events. + for secondSC == firstSC { + secondSC = <-cc.ShutdownSubConnCh + } + scToShutdown := []balancer.SubConn{firstSC, secondSC} if !(scToShutdown[0] == sc1 && scToShutdown[1] == sc2) && !(scToShutdown[0] == sc2 && scToShutdown[1] == sc1) { t.Errorf("ShutdownSubConn, want [%v, %v], got %v", sc1, sc2, scToShutdown) } @@ -647,15 +669,17 @@ func (s) TestPriority_InitTimeout(t *testing.T) { cc := testutils.NewBalancerClientConn(t) bb := balancer.Get(Name) - pb := bb.Build(cc, balancer.BuildOptions{}) + pb := bb.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer pb.Close() // Two localities, with different priorities, each with one backend. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ @@ -718,15 +742,17 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { cc := testutils.NewBalancerClientConn(t) bb := balancer.Get(Name) - pb := bb.Build(cc, balancer.BuildOptions{}) + pb := bb.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer pb.Close() // Two localities, with different priorities, each with one backend. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ @@ -780,9 +806,9 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { // Re-add two localities, with previous priorities, but different backends. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[2]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[3]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[2]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[3]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ @@ -823,8 +849,8 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { // Remove p1, to fallback to p0. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[2]}, []string{"child-0"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[2]}}}, []string{"child-0"}), }, }, BalancerConfig: &LBConfig{ @@ -839,9 +865,18 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { // p1 subconn should be shut down. scToShutdown1 := <-cc.ShutdownSubConnCh + // The same SubConn is closed by balancergroup, gracefulswitch and + // pickfirstleaf when they are closed. Remove duplicate events. + for scToShutdown1 == scToShutdown { + scToShutdown1 = <-cc.ShutdownSubConnCh + } if scToShutdown1 != sc11 { t.Fatalf("ShutdownSubConn, want %v, got %v", sc11, scToShutdown1) } + // The same SubConn is closed by balancergroup, gracefulswitch and + // pickfirstleaf when they are closed. Remove duplicate events. + <-cc.ShutdownSubConnCh + <-cc.ShutdownSubConnCh // Test pick return NoSubConn. if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { @@ -877,15 +912,17 @@ func (s) TestPriority_HighPriorityNoEndpoints(t *testing.T) { cc := testutils.NewBalancerClientConn(t) bb := balancer.Get(Name) - pb := bb.Build(cc, balancer.BuildOptions{}) + pb := bb.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer pb.Close() // Two localities, with priorities [0, 1], each with one backend. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ @@ -917,8 +954,8 @@ func (s) TestPriority_HighPriorityNoEndpoints(t *testing.T) { // Remove addresses from priority 0, should use p1. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ @@ -969,14 +1006,16 @@ func (s) TestPriority_FirstPriorityUnavailable(t *testing.T) { cc := testutils.NewBalancerClientConn(t) bb := balancer.Get(Name) - pb := bb.Build(cc, balancer.BuildOptions{}) + pb := bb.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer pb.Close() // One localities, with priorities [0], each with one backend. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), }, }, BalancerConfig: &LBConfig{ @@ -1015,15 +1054,17 @@ func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) { cc := testutils.NewBalancerClientConn(t) bb := balancer.Get(Name) - pb := bb.Build(cc, balancer.BuildOptions{}) + pb := bb.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer pb.Close() // Two children, with priorities [0, 1], each with one backend. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ @@ -1056,9 +1097,9 @@ func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) { // higher priority, and be used. The old SubConn should be closed. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ @@ -1112,15 +1153,17 @@ func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { cc := testutils.NewBalancerClientConn(t) bb := balancer.Get(Name) - pb := bb.Build(cc, balancer.BuildOptions{}) + pb := bb.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer pb.Close() // Two children, with priorities [0, 1], each with one backend. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ @@ -1165,9 +1208,9 @@ func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { // higher priority, and be used. The old SubConn should be closed. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ @@ -1183,6 +1226,10 @@ func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { // Old subconn from child-0 should be removed. scToShutdown := <-cc.ShutdownSubConnCh + // The same SubConn is closed by balancergroup, gracefulswitch and + // pickfirstleaf when they are closed. Remove duplicate events. + <-cc.ShutdownSubConnCh + <-cc.ShutdownSubConnCh if scToShutdown != sc0 { t.Fatalf("ShutdownSubConn, want %v, got %v", sc0, scToShutdown) } @@ -1208,15 +1255,17 @@ func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) { cc := testutils.NewBalancerClientConn(t) bb := balancer.Get(Name) - pb := bb.Build(cc, balancer.BuildOptions{}) + pb := bb.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer pb.Close() // Two children, with priorities [0, 1], each with one backend. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ @@ -1237,7 +1286,10 @@ func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) { sc0 := <-cc.NewSubConnCh // p0 is down. - sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + sc0.UpdateState(balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + ConnectionError: errors.New("test error"), + }) // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { @@ -1260,8 +1312,8 @@ func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) { // Remove child with p1, the child at higher priority should now be used. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), }, }, BalancerConfig: &LBConfig{ @@ -1312,14 +1364,16 @@ func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { cc := testutils.NewBalancerClientConn(t) bb := balancer.Get(Name) - pb := bb.Build(cc, balancer.BuildOptions{}) + pb := bb.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer pb.Close() // One children, with priorities [0], with one backend. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), }, }, BalancerConfig: &LBConfig{ @@ -1372,8 +1426,8 @@ func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { // Re-add the child, shouldn't create new connections. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), }, }, BalancerConfig: &LBConfig{ @@ -1411,14 +1465,16 @@ func (s) TestPriority_ChildPolicyChange(t *testing.T) { cc := testutils.NewBalancerClientConn(t) bb := balancer.Get(Name) - pb := bb.Build(cc, balancer.BuildOptions{}) + pb := bb.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer pb.Close() // One children, with priorities [0], with one backend. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), }, }, BalancerConfig: &LBConfig{ @@ -1450,8 +1506,8 @@ func (s) TestPriority_ChildPolicyChange(t *testing.T) { // name). if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), }, }, BalancerConfig: &LBConfig{ @@ -1510,14 +1566,16 @@ func (s) TestPriority_ChildPolicyUpdatePickerInline(t *testing.T) { cc := testutils.NewBalancerClientConn(t) bb := balancer.Get(Name) - pb := bb.Build(cc, balancer.BuildOptions{}) + pb := bb.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer pb.Close() // One children, with priorities [0], with one backend. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), }, }, BalancerConfig: &LBConfig{ @@ -1553,15 +1611,17 @@ func (s) TestPriority_IgnoreReresolutionRequest(t *testing.T) { cc := testutils.NewBalancerClientConn(t) bb := balancer.Get(Name) - pb := bb.Build(cc, balancer.BuildOptions{}) + pb := bb.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer pb.Close() // One children, with priorities [0], with one backend, reresolution is // ignored. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), }, }, BalancerConfig: &LBConfig{ @@ -1600,8 +1660,8 @@ func (s) TestPriority_IgnoreReresolutionRequest(t *testing.T) { // Send another update to set IgnoreReresolutionRequests to false. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), }, }, BalancerConfig: &LBConfig{ @@ -1652,16 +1712,18 @@ func (s) TestPriority_IgnoreReresolutionRequestTwoChildren(t *testing.T) { cc := testutils.NewBalancerClientConn(t) bb := balancer.Get(Name) - pb := bb.Build(cc, balancer.BuildOptions{}) + pb := bb.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer pb.Close() // One children, with priorities [0, 1], each with one backend. // Reresolution is ignored for p0. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ @@ -1748,7 +1810,7 @@ func init() { }) } - sc, err := bd.ClientConn.NewSubConn(opts.ResolverState.Addresses, balancer.NewSubConnOptions{StateListener: lis}) + sc, err := bd.ClientConn.NewSubConn(opts.ResolverState.Endpoints[0].Addresses, balancer.NewSubConnOptions{StateListener: lis}) if err != nil { return err } @@ -1774,15 +1836,17 @@ func (s) TestPriority_HighPriorityInitIdle(t *testing.T) { cc := testutils.NewBalancerClientConn(t) bb := balancer.Get(Name) - pb := bb.Build(cc, balancer.BuildOptions{}) + pb := bb.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer pb.Close() // Two children, with priorities [0, 1], each with one backend. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ @@ -1840,14 +1904,16 @@ func (s) TestPriority_AddLowPriorityWhenHighIsInIdle(t *testing.T) { cc := testutils.NewBalancerClientConn(t) bb := balancer.Get(Name) - pb := bb.Build(cc, balancer.BuildOptions{}) + pb := bb.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer pb.Close() // One child, with priorities [0], one backend. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), }, }, BalancerConfig: &LBConfig{ @@ -1875,9 +1941,9 @@ func (s) TestPriority_AddLowPriorityWhenHighIsInIdle(t *testing.T) { // Add 1, should keep using 0. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ @@ -1921,15 +1987,17 @@ func (s) TestPriority_HighPriorityUpdatesWhenLowInUse(t *testing.T) { cc := testutils.NewBalancerClientConn(t) bb := balancer.Get(Name) - pb := bb.Build(cc, balancer.BuildOptions{}) + pb := bb.Build(cc, balancer.BuildOptions{ + MetricsRecorder: &stats.NoopMetricsRecorder{}, + }) defer pb.Close() t.Log("Two localities, with priorities [0, 1], each with one backend.") if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ @@ -1985,9 +2053,9 @@ func (s) TestPriority_HighPriorityUpdatesWhenLowInUse(t *testing.T) { t.Log("Change p0 to use new address.") if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ - Addresses: []resolver.Address{ - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[2]}, []string{"child-0"}), - hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[3]}, []string{"child-1"}), + Endpoints: []resolver.Endpoint{ + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[2]}}}, []string{"child-0"}), + hierarchy.SetInEndpoint(resolver.Endpoint{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[3]}}}, []string{"child-1"}), }, }, BalancerConfig: &LBConfig{ diff --git a/xds/internal/balancer/ringhash/ringhash_test.go b/xds/internal/balancer/ringhash/ringhash_test.go index f6778d832f8c..6dc005640a10 100644 --- a/xds/internal/balancer/ringhash/ringhash_test.go +++ b/xds/internal/balancer/ringhash/ringhash_test.go @@ -41,6 +41,7 @@ var ( cmp.AllowUnexported(testutils.TestSubConn{}, ringEntry{}, subConn{}), cmpopts.IgnoreFields(subConn{}, "mu"), cmpopts.IgnoreFields(testutils.TestSubConn{}, "connectCalled"), + cmpopts.IgnoreFields(testutils.TestSubConn{}, "HealthUpdateDelivered"), } ) From bc738b7b44b8063b71308a86ffe7a7a7646f1ffb Mon Sep 17 00:00:00 2001 From: Arjan Bal Date: Mon, 13 Jan 2025 15:14:52 +0530 Subject: [PATCH 2/4] Revert e2e style tests --- test/roundrobin_test.go | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/test/roundrobin_test.go b/test/roundrobin_test.go index bb69d576bdfb..7fa92aaaedc1 100644 --- a/test/roundrobin_test.go +++ b/test/roundrobin_test.go @@ -89,7 +89,7 @@ func testRoundRobinBasic(ctx context.Context, t *testing.T, opts ...grpc.DialOpt t.Fatalf("EmptyCall() = %s, want %s", status.Code(err), codes.DeadlineExceeded) } - r.UpdateState(resolver.State{Endpoints: endpoints}) + r.UpdateState(resolver.State{Addresses: addrs}) if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs); err != nil { t.Fatal(err) } @@ -139,7 +139,7 @@ func (s) TestRoundRobin_NewAddressWhileBlocking(t *testing.T) { // Send a resolver update with no addresses. This should push the channel into // TransientFailure. - r.UpdateState(resolver.State{Endpoints: []resolver.Endpoint{}}) + r.UpdateState(resolver.State{Addresses: []resolver.Address{}}) testutils.AwaitState(ctx, t, cc, connectivity.TransientFailure) client := testgrpc.NewTestServiceClient(cc) @@ -175,9 +175,7 @@ func (s) TestRoundRobin_NewAddressWhileBlocking(t *testing.T) { // Send a resolver update with a valid backend to push the channel to Ready // and unblock the above RPC. - r.UpdateState(resolver.State{Endpoints: []resolver.Endpoint{ - {Addresses: []resolver.Address{{Addr: backends[0].Address}}}, - }}) + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: backends[0].Address}}}) select { case <-ctx.Done(): @@ -272,9 +270,7 @@ func (s) TestRoundRobin_UpdateAddressAttributes(t *testing.T) { } // Set an initial resolver update with no address attributes. addr := resolver.Address{Addr: backend.Address} - r.InitialState(resolver.State{Endpoints: []resolver.Endpoint{ - {Addresses: []resolver.Address{addr}}, - }}) + r.InitialState(resolver.State{Addresses: []resolver.Address{addr}}) cc, err := grpc.NewClient(r.Scheme()+":///test.server", dopts...) if err != nil { t.Fatalf("grpc.NewClient() failed: %v", err) @@ -299,9 +295,7 @@ func (s) TestRoundRobin_UpdateAddressAttributes(t *testing.T) { // Send a resolver update with address attributes. addrWithAttributes := imetadata.Set(addr, metadata.Pairs(testMDKey, testMDValue)) - r.UpdateState(resolver.State{Endpoints: []resolver.Endpoint{ - {Addresses: []resolver.Address{addrWithAttributes}}, - }}) + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrWithAttributes}}) // Make an RPC and ensure it contains the metadata we are looking for. The // resolver update isn't processed synchronously, so we wait some time before From da80e2e5504bbc6678824335805e283fb37a82ed Mon Sep 17 00:00:00 2001 From: Arjan Bal Date: Mon, 13 Jan 2025 17:35:16 +0530 Subject: [PATCH 3/4] Address review comments --- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 3 +- balancer/roundrobin/roundrobin.go | 28 +++---------------- .../weightedtarget/weightedtarget_test.go | 4 +++ .../balancer/priority/balancer_test.go | 4 +++ 4 files changed, 14 insertions(+), 25 deletions(-) diff --git a/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index 1818ad065041..76fa5fea95f2 100644 --- a/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -618,7 +618,6 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub // Record a connection attempt when exiting CONNECTING. if newState.ConnectivityState == connectivity.TransientFailure { sd.connectionFailedInFirstPass = true - sd.lastErr = newState.ConnectionError connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) } @@ -703,6 +702,7 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub }) } case connectivity.TransientFailure: + sd.lastErr = newState.ConnectionError sd.effectiveState = connectivity.TransientFailure // Since we're re-using common SubConns while handling resolver // updates, we could receive an out of turn TRANSIENT_FAILURE from @@ -728,6 +728,7 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub switch newState.ConnectivityState { case connectivity.TransientFailure: b.numTF = (b.numTF + 1) % b.subConns.Len() + sd.lastErr = newState.ConnectionError if b.numTF%b.subConns.Len() == 0 { b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.TransientFailure, diff --git a/balancer/roundrobin/roundrobin.go b/balancer/roundrobin/roundrobin.go index 2ee840ceff15..bc787dce8c31 100644 --- a/balancer/roundrobin/roundrobin.go +++ b/balancer/roundrobin/roundrobin.go @@ -59,8 +59,8 @@ func (bb builder) Name() string { func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { bal := &rrBalancer{ - cc: cc, - child: endpointsharding.NewBalancer(cc, opts), + cc: cc, + Balancer: endpointsharding.NewBalancer(cc, opts), } bal.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[%p] ", bal)) bal.logger.Infof("Created") @@ -68,35 +68,15 @@ func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) bala } type rrBalancer struct { + balancer.Balancer cc balancer.ClientConn - child balancer.Balancer logger *internalgrpclog.PrefixLogger } -func (b *rrBalancer) Close() { - b.child.Close() -} - -func (b *rrBalancer) ExitIdle() { - // Should always be ok, as child is endpoint sharding. - if ei, ok := b.child.(balancer.ExitIdler); ok { - ei.ExitIdle() - } -} - -func (b *rrBalancer) ResolverError(err error) { - // Will cause inline picker update from endpoint sharding. - b.child.ResolverError(err) -} - func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { // Enable the health listener in pickfirst children for client side health // checks and outlier detection, if configured. ccs.ResolverState = pickfirstleaf.EnableHealthListener(ccs.ResolverState) ccs.BalancerConfig = endpointShardingLBConfig - return b.child.UpdateClientConnState(ccs) -} - -func (b *rrBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) + return b.Balancer.UpdateClientConnState(ccs) } diff --git a/balancer/weightedtarget/weightedtarget_test.go b/balancer/weightedtarget/weightedtarget_test.go index 3edabbefe504..a5f152836c45 100644 --- a/balancer/weightedtarget/weightedtarget_test.go +++ b/balancer/weightedtarget/weightedtarget_test.go @@ -827,6 +827,7 @@ func (s) TestWeightedTarget_ThreeSubBalancers_RemoveBalancer(t *testing.T) { sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) <-cc.NewPickerCh + <-sc3.ConnectCh sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) p := <-cc.NewPickerCh @@ -877,6 +878,9 @@ func (s) TestWeightedTarget_ThreeSubBalancers_RemoveBalancer(t *testing.T) { } // Move balancer 3 into transient failure. + sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Idle}) + <-sc3.ConnectCh + sc3.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) wantSubConnErr := errors.New("subConn connection error") sc3.UpdateState(balancer.SubConnState{ ConnectivityState: connectivity.TransientFailure, diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index 0c4d6c259a88..dd279f90acc1 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -254,6 +254,7 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc1 := <-cc.NewSubConnCh + <-sc1.ConnectCh sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready}) @@ -292,6 +293,9 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { } t.Log("Turn down 1, use 2.") + sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Idle}) + <-sc1.ConnectCh + sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting}) sc1.UpdateState(balancer.SubConnState{ ConnectivityState: connectivity.TransientFailure, ConnectionError: errors.New("test error"), From b4a99448185ca9c440b4d2c3ca4b1d1d9e789362 Mon Sep 17 00:00:00 2001 From: Arjan Bal Date: Mon, 13 Jan 2025 17:41:28 +0530 Subject: [PATCH 4/4] Add todo comments --- balancer/weightedtarget/weightedtarget_test.go | 6 ++++++ internal/balancergroup/balancergroup_test.go | 9 +++++++++ xds/internal/balancer/priority/balancer_test.go | 12 ++++++++++++ 3 files changed, 27 insertions(+) diff --git a/balancer/weightedtarget/weightedtarget_test.go b/balancer/weightedtarget/weightedtarget_test.go index a5f152836c45..ef7c45519248 100644 --- a/balancer/weightedtarget/weightedtarget_test.go +++ b/balancer/weightedtarget/weightedtarget_test.go @@ -297,6 +297,9 @@ func (s) TestWeightedTarget(t *testing.T) { // The same SubConn is closed by balancergroup, gracefulswitch and // pickfirstleaf when they are closed. Remove duplicate events. + // TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this + // workaround once pickfirst is the only leaf policy and responsible for + // shutting down SubConns. initialSC := scShutdown for scShutdown == initialSC { scShutdown = <-cc.ShutdownSubConnCh @@ -912,6 +915,9 @@ func (s) TestWeightedTarget_ThreeSubBalancers_RemoveBalancer(t *testing.T) { // The same SubConn is closed by balancergroup, gracefulswitch and // pickfirstleaf when they are closed. Remove duplicate events. + // TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this + // workaround once pickfirst is the only leaf policy and responsible for + // shutting down SubConns. initialSC := scShutdown for scShutdown == initialSC { scShutdown = <-cc.ShutdownSubConnCh diff --git a/internal/balancergroup/balancergroup_test.go b/internal/balancergroup/balancergroup_test.go index 158b5bb3b993..d72b1054acf0 100644 --- a/internal/balancergroup/balancergroup_test.go +++ b/internal/balancergroup/balancergroup_test.go @@ -371,6 +371,9 @@ func (s) TestBalancerGroup_locality_caching_not_read_within_timeout(t *testing.T defer cancel() // The same SubConn is closed by balancergroup, gracefulswitch and // pickfirstleaf when they are closed. + // TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this + // workaround once pickfirst is the only leaf policy and responsible for + // shutting down SubConns. scToShutdown := map[balancer.SubConn]int{ addrToSC[testBackendAddrs[2].Addr]: 3, addrToSC[testBackendAddrs[3].Addr]: 3, @@ -419,6 +422,9 @@ func (s) TestBalancerGroup_locality_caching_read_with_different_builder(t *testi shutdownTimeout := time.After(time.Millisecond * 500) // The same SubConn is closed by balancergroup, gracefulswitch and // pickfirstleaf when they are closed. + // TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this + // workaround once pickfirst is the only leaf policy and responsible for + // shutting down SubConns. scToShutdown := map[balancer.SubConn]int{ addrToSC[testBackendAddrs[2].Addr]: 3, addrToSC[testBackendAddrs[3].Addr]: 3, @@ -696,6 +702,9 @@ func (s) TestBalancerGracefulSwitch(t *testing.T) { defer cancel() // The same SubConn is closed by balancergroup, gracefulswitch and // pickfirstleaf when they are closed. + // TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this + // workaround once pickfirst is the only leaf policy and responsible for + // shutting down SubConns. scToShutdown := map[balancer.SubConn]int{ m1[testBackendAddrs[0].Addr]: 3, m1[testBackendAddrs[1].Addr]: 3, diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index dd279f90acc1..2247ec3cd925 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -640,6 +640,9 @@ func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) { secondSC := <-cc.ShutdownSubConnCh // The same SubConn is closed by balancergroup, gracefulswitch and // pickfirstleaf when they are closed. Remove duplicate events. + // TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this + // workaround once pickfirst is the only leaf policy and responsible for + // shutting down SubConns. for secondSC == firstSC { secondSC = <-cc.ShutdownSubConnCh } @@ -871,6 +874,9 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { scToShutdown1 := <-cc.ShutdownSubConnCh // The same SubConn is closed by balancergroup, gracefulswitch and // pickfirstleaf when they are closed. Remove duplicate events. + // TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this + // workaround once pickfirst is the only leaf policy and responsible for + // shutting down SubConns. for scToShutdown1 == scToShutdown { scToShutdown1 = <-cc.ShutdownSubConnCh } @@ -879,6 +885,9 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { } // The same SubConn is closed by balancergroup, gracefulswitch and // pickfirstleaf when they are closed. Remove duplicate events. + // TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this + // workaround once pickfirst is the only leaf policy and responsible for + // shutting down SubConns. <-cc.ShutdownSubConnCh <-cc.ShutdownSubConnCh @@ -1232,6 +1241,9 @@ func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { scToShutdown := <-cc.ShutdownSubConnCh // The same SubConn is closed by balancergroup, gracefulswitch and // pickfirstleaf when they are closed. Remove duplicate events. + // TODO: https://github.com/grpc/grpc-go/issues/6472 - Remove this + // workaround once pickfirst is the only leaf policy and responsible for + // shutting down SubConns. <-cc.ShutdownSubConnCh <-cc.ShutdownSubConnCh if scToShutdown != sc0 {