From d7451605800e25716561879372279d0713a72ed6 Mon Sep 17 00:00:00 2001 From: zyguan Date: Tue, 16 Jan 2024 18:04:22 +0800 Subject: [PATCH] avoid data race in tests Signed-off-by: zyguan --- internal/locate/region_cache_test.go | 24 ++++++++++++++++++++---- internal/locate/region_request3_test.go | 16 +++++++++++++--- 2 files changed, 33 insertions(+), 7 deletions(-) diff --git a/internal/locate/region_cache_test.go b/internal/locate/region_cache_test.go index 5550409f38..fbc1e8b160 100644 --- a/internal/locate/region_cache_test.go +++ b/internal/locate/region_cache_test.go @@ -85,6 +85,7 @@ type testRegionCacheSuite struct { region1 uint64 cache *RegionCache bo *retry.Backoffer + onClosed func() } func (s *testRegionCacheSuite) SetupTest() { @@ -104,6 +105,9 @@ func (s *testRegionCacheSuite) SetupTest() { func (s *testRegionCacheSuite) TearDownTest() { s.cache.Close() s.mvccStore.Close() + if s.onClosed != nil { + s.onClosed() + } } func (s *testRegionCacheSuite) storeAddr(id uint64) string { @@ -306,6 +310,14 @@ func (s *testRegionCacheSuite) TestResolveStateTransition() { } func (s *testRegionCacheSuite) TestReloadRegionWithDownPeers() { + s.onClosed = func() { + // TODO(zyguan): cache.Close doesn't wait for exit of background goroutines, sleep a while to avoid data race. + // This is a quick-and-dirty workaround, we should refine background job management in the future. + time.Sleep(time.Second) + SetRegionCacheTTLSec(600) + } + SetRegionCacheTTLSec(2) + cntGetRegion := 0 s.cache.pdClient = &inspectedPDClient{ Client: s.cache.pdClient, @@ -316,8 +328,6 @@ func (s *testRegionCacheSuite) TestReloadRegionWithDownPeers() { } s.cluster.MarkPeerDown(s.peer2) - defer SetRegionCacheTTLSec(regionCacheTTLSec) - SetRegionCacheTTLSec(2) for i := 0; i < 50; i++ { time.Sleep(100 * time.Millisecond) _, err := s.cache.LocateKey(s.bo, []byte("a")) @@ -327,6 +337,14 @@ func (s *testRegionCacheSuite) TestReloadRegionWithDownPeers() { } func (s *testRegionCacheSuite) TestTiFlashRecoveredFromDown() { + s.onClosed = func() { + // TODO(zyguan): cache.Close doesn't wait for exit of background goroutines, sleep a while to avoid data race. + // This is a quick-and-dirty workaround, we should refine background job management in the future. + time.Sleep(time.Second) + SetRegionCacheTTLSec(600) + } + SetRegionCacheTTLSec(3) + store3 := s.cluster.AllocID() peer3 := s.cluster.AllocID() s.cluster.AddStore(store3, s.storeAddr(store3)) @@ -357,8 +375,6 @@ func (s *testRegionCacheSuite) TestTiFlashRecoveredFromDown() { region = s.cache.GetCachedRegionWithRLock(loc.Region) s.Equal(region.hasDownPeers, true) - defer SetRegionCacheTTLSec(regionCacheTTLSec) - SetRegionCacheTTLSec(3) for i := 0; i <= 3; i++ { time.Sleep(1 * time.Second) loc, err = s.cache.LocateKey(s.bo, []byte("a")) diff --git a/internal/locate/region_request3_test.go b/internal/locate/region_request3_test.go index 21d45f78f6..24d5e15429 100644 --- a/internal/locate/region_request3_test.go +++ b/internal/locate/region_request3_test.go @@ -75,9 +75,11 @@ type testRegionRequestToThreeStoresSuite struct { bo *retry.Backoffer regionRequestSender *RegionRequestSender mvccStore mocktikv.MVCCStore + onClosed func() } func (s *testRegionRequestToThreeStoresSuite) SetupTest() { + SetRegionCacheTTLSec(600) s.mvccStore = mocktikv.MustNewMVCCStore() s.cluster = mocktikv.NewCluster(s.mvccStore) s.storeIDs, s.peerIDs, s.regionID, s.leaderPeer = mocktikv.BootstrapWithMultiStores(s.cluster, 3) @@ -91,6 +93,9 @@ func (s *testRegionRequestToThreeStoresSuite) SetupTest() { func (s *testRegionRequestToThreeStoresSuite) TearDownTest() { s.cache.Close() s.mvccStore.Close() + if s.onClosed != nil { + s.onClosed() + } } func (s *testRegionRequestToThreeStoresSuite) TestStoreTokenLimit() { @@ -1697,7 +1702,12 @@ func (s *testRegionRequestToThreeStoresSuite) TestDoNotTryUnreachableLeader() { } func (s *testRegionRequestToThreeStoresSuite) TestTiKVRecoveredFromDown() { - defer SetRegionCacheTTLSec(regionCacheTTLSec) + s.onClosed = func() { + // TODO(zyguan): cache.Close doesn't wait for exit of background goroutines, sleep a while to avoid data race. + // This is a quick-and-dirty workaround, we should refine background job management in the future. + time.Sleep(time.Second) + SetRegionCacheTTLSec(600) + } SetRegionCacheTTLSec(2) bo := retry.NewBackoffer(context.Background(), -1) @@ -1713,7 +1723,7 @@ func (s *testRegionRequestToThreeStoresSuite) TestTiKVRecoveredFromDown() { s.Require().NotEqual(addr, downStore.Address) return &tikvrpc.Response{Resp: &kvrpcpb.GetResponse{Value: []byte(addr)}}, nil }} - for i := 0; i < 30; i++ { + for i := 0; i < 15; i++ { time.Sleep(200 * time.Millisecond) loc, err := s.cache.LocateKey(bo, key) s.Require().Nil(err) @@ -1726,7 +1736,7 @@ func (s *testRegionRequestToThreeStoresSuite) TestTiKVRecoveredFromDown() { s.regionRequestSender.client = &fnClient{fn: func(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) { return &tikvrpc.Response{Resp: &kvrpcpb.GetResponse{Value: []byte(addr)}}, nil }} - for i := 0; i < 30; i++ { + for i := 0; i < 15; i++ { time.Sleep(200 * time.Millisecond) loc, err := s.cache.LocateKey(bo, key) s.Require().Nil(err)