Skip to content

Commit

Permalink
avoid data race in tests
Browse files Browse the repository at this point in the history
Signed-off-by: zyguan <[email protected]>
  • Loading branch information
zyguan committed Jan 16, 2024
1 parent 2c75a2e commit d745160
Show file tree
Hide file tree
Showing 2 changed files with 33 additions and 7 deletions.
24 changes: 20 additions & 4 deletions internal/locate/region_cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ type testRegionCacheSuite struct {
region1 uint64
cache *RegionCache
bo *retry.Backoffer
onClosed func()
}

func (s *testRegionCacheSuite) SetupTest() {
Expand All @@ -104,6 +105,9 @@ func (s *testRegionCacheSuite) SetupTest() {
func (s *testRegionCacheSuite) TearDownTest() {
s.cache.Close()
s.mvccStore.Close()
if s.onClosed != nil {
s.onClosed()
}
}

func (s *testRegionCacheSuite) storeAddr(id uint64) string {
Expand Down Expand Up @@ -306,6 +310,14 @@ func (s *testRegionCacheSuite) TestResolveStateTransition() {
}

func (s *testRegionCacheSuite) TestReloadRegionWithDownPeers() {
s.onClosed = func() {
// TODO(zyguan): cache.Close doesn't wait for exit of background goroutines, sleep a while to avoid data race.
// This is a quick-and-dirty workaround, we should refine background job management in the future.
time.Sleep(time.Second)
SetRegionCacheTTLSec(600)
}
SetRegionCacheTTLSec(2)

cntGetRegion := 0
s.cache.pdClient = &inspectedPDClient{
Client: s.cache.pdClient,
Expand All @@ -316,8 +328,6 @@ func (s *testRegionCacheSuite) TestReloadRegionWithDownPeers() {
}
s.cluster.MarkPeerDown(s.peer2)

defer SetRegionCacheTTLSec(regionCacheTTLSec)
SetRegionCacheTTLSec(2)
for i := 0; i < 50; i++ {
time.Sleep(100 * time.Millisecond)
_, err := s.cache.LocateKey(s.bo, []byte("a"))
Expand All @@ -327,6 +337,14 @@ func (s *testRegionCacheSuite) TestReloadRegionWithDownPeers() {
}

func (s *testRegionCacheSuite) TestTiFlashRecoveredFromDown() {
s.onClosed = func() {
// TODO(zyguan): cache.Close doesn't wait for exit of background goroutines, sleep a while to avoid data race.
// This is a quick-and-dirty workaround, we should refine background job management in the future.
time.Sleep(time.Second)
SetRegionCacheTTLSec(600)
}
SetRegionCacheTTLSec(3)

store3 := s.cluster.AllocID()
peer3 := s.cluster.AllocID()
s.cluster.AddStore(store3, s.storeAddr(store3))
Expand Down Expand Up @@ -357,8 +375,6 @@ func (s *testRegionCacheSuite) TestTiFlashRecoveredFromDown() {
region = s.cache.GetCachedRegionWithRLock(loc.Region)
s.Equal(region.hasDownPeers, true)

defer SetRegionCacheTTLSec(regionCacheTTLSec)
SetRegionCacheTTLSec(3)
for i := 0; i <= 3; i++ {
time.Sleep(1 * time.Second)
loc, err = s.cache.LocateKey(s.bo, []byte("a"))
Expand Down
16 changes: 13 additions & 3 deletions internal/locate/region_request3_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,11 @@ type testRegionRequestToThreeStoresSuite struct {
bo *retry.Backoffer
regionRequestSender *RegionRequestSender
mvccStore mocktikv.MVCCStore
onClosed func()
}

func (s *testRegionRequestToThreeStoresSuite) SetupTest() {
SetRegionCacheTTLSec(600)
s.mvccStore = mocktikv.MustNewMVCCStore()
s.cluster = mocktikv.NewCluster(s.mvccStore)
s.storeIDs, s.peerIDs, s.regionID, s.leaderPeer = mocktikv.BootstrapWithMultiStores(s.cluster, 3)
Expand All @@ -91,6 +93,9 @@ func (s *testRegionRequestToThreeStoresSuite) SetupTest() {
func (s *testRegionRequestToThreeStoresSuite) TearDownTest() {
s.cache.Close()
s.mvccStore.Close()
if s.onClosed != nil {
s.onClosed()
}
}

func (s *testRegionRequestToThreeStoresSuite) TestStoreTokenLimit() {
Expand Down Expand Up @@ -1697,7 +1702,12 @@ func (s *testRegionRequestToThreeStoresSuite) TestDoNotTryUnreachableLeader() {
}

func (s *testRegionRequestToThreeStoresSuite) TestTiKVRecoveredFromDown() {
defer SetRegionCacheTTLSec(regionCacheTTLSec)
s.onClosed = func() {
// TODO(zyguan): cache.Close doesn't wait for exit of background goroutines, sleep a while to avoid data race.
// This is a quick-and-dirty workaround, we should refine background job management in the future.
time.Sleep(time.Second)
SetRegionCacheTTLSec(600)
}
SetRegionCacheTTLSec(2)

bo := retry.NewBackoffer(context.Background(), -1)
Expand All @@ -1713,7 +1723,7 @@ func (s *testRegionRequestToThreeStoresSuite) TestTiKVRecoveredFromDown() {
s.Require().NotEqual(addr, downStore.Address)
return &tikvrpc.Response{Resp: &kvrpcpb.GetResponse{Value: []byte(addr)}}, nil
}}
for i := 0; i < 30; i++ {
for i := 0; i < 15; i++ {
time.Sleep(200 * time.Millisecond)
loc, err := s.cache.LocateKey(bo, key)
s.Require().Nil(err)
Expand All @@ -1726,7 +1736,7 @@ func (s *testRegionRequestToThreeStoresSuite) TestTiKVRecoveredFromDown() {
s.regionRequestSender.client = &fnClient{fn: func(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
return &tikvrpc.Response{Resp: &kvrpcpb.GetResponse{Value: []byte(addr)}}, nil
}}
for i := 0; i < 30; i++ {
for i := 0; i < 15; i++ {
time.Sleep(200 * time.Millisecond)
loc, err := s.cache.LocateKey(bo, key)
s.Require().Nil(err)
Expand Down

0 comments on commit d745160

Please sign in to comment.