From 66967bc7df7f1cf3f6097108a35fda43cf37853f Mon Sep 17 00:00:00 2001 From: okJiang <819421878@qq.com> Date: Thu, 2 Jan 2025 15:35:42 +0800 Subject: [PATCH] This is an automated cherry-pick of #8951 close tikv/pd#8950 Signed-off-by: ti-chi-bot --- client/clients/tso/client.go | 587 +++++++++++++++++++++++ client/go.mod | 5 + client/go.sum | 11 +- go.mod | 5 + go.sum | 5 +- server/grpc_service.go | 38 ++ server/server.go | 12 + tests/integrations/client/client_test.go | 98 ++++ tests/integrations/client/go.mod | 13 + tests/integrations/mcs/go.mod | 9 + tests/integrations/mcs/go.sum | 12 +- tests/integrations/tso/go.sum | 12 +- 12 files changed, 796 insertions(+), 11 deletions(-) create mode 100644 client/clients/tso/client.go diff --git a/client/clients/tso/client.go b/client/clients/tso/client.go new file mode 100644 index 00000000000..d24dba52394 --- /dev/null +++ b/client/clients/tso/client.go @@ -0,0 +1,587 @@ +// Copyright 2023 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tso + +import ( + "context" + "math/rand" + "runtime/trace" + "sync" + "sync/atomic" + "time" + + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" + + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/log" + + "github.com/tikv/pd/client/constants" + "github.com/tikv/pd/client/errs" + "github.com/tikv/pd/client/metrics" + "github.com/tikv/pd/client/opt" + cctx "github.com/tikv/pd/client/pkg/connectionctx" + "github.com/tikv/pd/client/pkg/utils/grpcutil" + "github.com/tikv/pd/client/pkg/utils/tlsutil" + sd "github.com/tikv/pd/client/servicediscovery" +) + +const ( + // defaultMaxTSOBatchSize is the default max size of the TSO request batch. + defaultMaxTSOBatchSize = 10000 + dispatchRetryDelay = 50 * time.Millisecond + dispatchRetryCount = 2 +) + +// Client defines the interface of a TSO client. +type Client interface { + // GetTS gets a timestamp from PD or TSO microservice. + GetTS(ctx context.Context) (int64, int64, error) + // GetTSAsync gets a timestamp from PD or TSO microservice, without block the caller. + GetTSAsync(ctx context.Context) TSFuture + // GetMinTS gets a timestamp from PD or the minimal timestamp across all keyspace groups from + // the TSO microservice. + GetMinTS(ctx context.Context) (int64, int64, error) + + // Deprecated: the Local TSO feature has been deprecated. Regardless of the + // parameters passed, the behavior of this interface will be equivalent to + // `GetTS`. If you want to use a separately deployed TSO service, + // please refer to the deployment of the TSO microservice. + GetLocalTS(ctx context.Context, _ string) (int64, int64, error) + // Deprecated: the Local TSO feature has been deprecated. Regardless of the + // parameters passed, the behavior of this interface will be equivalent to + // `GetTSAsync`. If you want to use a separately deployed TSO service, + // please refer to the deployment of the TSO microservice. + GetLocalTSAsync(ctx context.Context, _ string) TSFuture +} + +// Cli is the implementation of the TSO client. +type Cli struct { + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + option *opt.Option + + svcDiscovery sd.ServiceDiscovery + tsoStreamBuilderFactory + // leaderURL is the URL of the TSO leader. + leaderURL atomic.Value + conCtxMgr *cctx.Manager[*tsoStream] + updateConCtxsCh chan struct{} + + // tsoReqPool is the pool to recycle `*tsoRequest`. + tsoReqPool *sync.Pool + // dispatcher is used to dispatch the TSO requests to the channel. + dispatcher atomic.Pointer[tsoDispatcher] +} + +// NewClient returns a new TSO client. +func NewClient( + ctx context.Context, option *opt.Option, + svcDiscovery sd.ServiceDiscovery, factory tsoStreamBuilderFactory, +) *Cli { + ctx, cancel := context.WithCancel(ctx) + c := &Cli{ + ctx: ctx, + cancel: cancel, + option: option, + svcDiscovery: svcDiscovery, + tsoStreamBuilderFactory: factory, + conCtxMgr: cctx.NewManager[*tsoStream](), + updateConCtxsCh: make(chan struct{}, 1), + tsoReqPool: &sync.Pool{ + New: func() any { + return &Request{ + done: make(chan error, 1), + physical: 0, + logical: 0, + } + }, + }, + } + + eventSrc := svcDiscovery.(sd.TSOEventSource) + eventSrc.SetTSOLeaderURLUpdatedCallback(c.updateTSOLeaderURL) + c.svcDiscovery.AddServiceURLsSwitchedCallback(c.scheduleUpdateTSOConnectionCtxs) + + return c +} + +func (c *Cli) getOption() *opt.Option { return c.option } + +func (c *Cli) getServiceDiscovery() sd.ServiceDiscovery { return c.svcDiscovery } + +func (c *Cli) getConnectionCtxMgr() *cctx.Manager[*tsoStream] { return c.conCtxMgr } + +func (c *Cli) getDispatcher() *tsoDispatcher { + return c.dispatcher.Load() +} + +// GetRequestPool gets the request pool of the TSO client. +func (c *Cli) GetRequestPool() *sync.Pool { + return c.tsoReqPool +} + +// Setup initializes the TSO client. +func (c *Cli) Setup() { + // Daemon goroutine to update the connectionCtxs periodically and handle the `connectionCtxs` update event. + go c.connectionCtxsUpdater() + if err := c.svcDiscovery.CheckMemberChanged(); err != nil { + log.Warn("[tso] failed to check member changed", errs.ZapError(err)) + } + c.tryCreateTSODispatcher() +} + +// Close closes the TSO client +func (c *Cli) Close() { + if c == nil { + return + } + log.Info("[tso] closing tso client") + + c.cancel() + c.wg.Wait() + + log.Info("[tso] close tso client") + c.getDispatcher().close() + log.Info("[tso] tso client is closed") +} + +// scheduleUpdateTSOConnectionCtxs schedules the update of the TSO connection contexts. +func (c *Cli) scheduleUpdateTSOConnectionCtxs() { + select { + case c.updateConCtxsCh <- struct{}{}: + default: + } +} + +// GetTSORequest gets a TSO request from the pool. +func (c *Cli) GetTSORequest(ctx context.Context) *Request { + req := c.tsoReqPool.Get().(*Request) + // Set needed fields in the request before using it. + req.start = time.Now() + req.pool = c.tsoReqPool + req.requestCtx = ctx + req.clientCtx = c.ctx + req.physical = 0 + req.logical = 0 + req.streamID = "" + return req +} + +func (c *Cli) getLeaderURL() string { + url := c.leaderURL.Load() + if url == nil { + return "" + } + return url.(string) +} + +// getTSOLeaderClientConn returns the TSO leader gRPC client connection. +func (c *Cli) getTSOLeaderClientConn() (*grpc.ClientConn, string) { + url := c.getLeaderURL() + if len(url) == 0 { + log.Fatal("[tso] the tso leader should exist") + } + cc, ok := c.svcDiscovery.GetClientConns().Load(url) + if !ok { + return nil, url + } + return cc.(*grpc.ClientConn), url +} + +func (c *Cli) updateTSOLeaderURL(url string) error { + c.leaderURL.Store(url) + log.Info("[tso] switch the tso leader serving url", zap.String("new-url", url)) + // Try to create the TSO dispatcher if it is not created yet. + c.tryCreateTSODispatcher() + // Update the TSO connection contexts after the dispatcher is ready. + c.scheduleUpdateTSOConnectionCtxs() + return nil +} + +// backupClientConn gets a grpc client connection of the current reachable and healthy +// backup service endpoints randomly. Backup service endpoints are followers in a +// quorum-based cluster or secondaries in a primary/secondary configured cluster. +func (c *Cli) backupClientConn() (*grpc.ClientConn, string) { + urls := c.svcDiscovery.GetBackupURLs() + if len(urls) < 1 { + return nil, "" + } + var ( + cc *grpc.ClientConn + err error + ) + for range urls { + url := urls[rand.Intn(len(urls))] + if cc, err = c.svcDiscovery.GetOrCreateGRPCConn(url); err != nil { + continue + } + healthCtx, healthCancel := context.WithTimeout(c.ctx, c.option.Timeout) + resp, err := healthpb.NewHealthClient(cc).Check(healthCtx, &healthpb.HealthCheckRequest{Service: ""}) + healthCancel() + if err == nil && resp.GetStatus() == healthpb.HealthCheckResponse_SERVING { + return cc, url + } + } + return nil, "" +} + +// connectionCtxsUpdater updates the `connectionCtxs` regularly. +func (c *Cli) connectionCtxsUpdater() { + log.Info("[tso] start tso connection contexts updater") + + var updateTicker = &time.Ticker{} + setNewUpdateTicker := func(interval time.Duration) { + if updateTicker.C != nil { + updateTicker.Stop() + } + if interval == 0 { + updateTicker = &time.Ticker{} + } else { + updateTicker = time.NewTicker(interval) + } + } + // If the TSO Follower Proxy is enabled, set the update interval to the member update interval. + if c.option.GetEnableTSOFollowerProxy() { + setNewUpdateTicker(sd.MemberUpdateInterval) + } + // Set to nil before returning to ensure that the existing ticker can be GC. + defer setNewUpdateTicker(0) + + ctx, cancel := context.WithCancel(c.ctx) + defer cancel() + for { + c.updateConnectionCtxs(ctx) + select { + case <-ctx.Done(): + log.Info("[tso] exit tso connection contexts updater") + return + case <-c.option.EnableTSOFollowerProxyCh: + enableTSOFollowerProxy := c.option.GetEnableTSOFollowerProxy() + log.Info("[tso] tso follower proxy status changed", + zap.Bool("enable", enableTSOFollowerProxy)) + if enableTSOFollowerProxy && updateTicker.C == nil { + // Because the TSO Follower Proxy is enabled, + // the periodic check needs to be performed. + setNewUpdateTicker(sd.MemberUpdateInterval) + failpoint.Inject("speedUpTsoDispatcherUpdateInterval", func() { + setNewUpdateTicker(10 * time.Millisecond) + }) + } else if !enableTSOFollowerProxy && updateTicker.C != nil { + // Because the TSO Follower Proxy is disabled, + // the periodic check needs to be turned off. + setNewUpdateTicker(0) + } + case <-updateTicker.C: + // Triggered periodically when the TSO Follower Proxy is enabled. + case <-c.updateConCtxsCh: + // Triggered by the leader/follower change. + } + } +} + +// updateConnectionCtxs will choose the proper way to update the connections. +// It will return a bool to indicate whether the update is successful. +func (c *Cli) updateConnectionCtxs(ctx context.Context) bool { + // Normal connection creating, it will be affected by the `enableForwarding`. + createTSOConnection := c.tryConnectToTSO + if c.option.GetEnableTSOFollowerProxy() { + createTSOConnection = c.tryConnectToTSOWithProxy + } + if err := createTSOConnection(ctx); err != nil { + log.Error("[tso] update connection contexts failed", errs.ZapError(err)) + return false + } + return true +} + +// tryConnectToTSO will try to connect to the TSO leader. If the connection becomes unreachable +// and enableForwarding is true, it will create a new connection to a follower to do the forwarding, +// while a new daemon will be created also to switch back to a normal leader connection ASAP the +// connection comes back to normal. +func (c *Cli) tryConnectToTSO(ctx context.Context) error { + var ( + networkErrNum uint64 + err error + stream *tsoStream + url string + cc *grpc.ClientConn + ) + + ticker := time.NewTicker(constants.RetryInterval) + defer ticker.Stop() + // Retry several times before falling back to the follower when the network problem happens + for range constants.MaxRetryTimes { + c.svcDiscovery.ScheduleCheckMemberChanged() + cc, url = c.getTSOLeaderClientConn() + if c.conCtxMgr.Exist(url) { + // Just trigger the clean up of the stale connection contexts. + c.conCtxMgr.CleanAllAndStore(ctx, url) + return nil + } + if cc != nil { + cctx, cancel := context.WithCancel(ctx) + stream, err = c.tsoStreamBuilderFactory.makeBuilder(cc).build(cctx, cancel, c.option.Timeout) + failpoint.Inject("unreachableNetwork", func() { + stream = nil + err = status.New(codes.Unavailable, "unavailable").Err() + }) + if stream != nil && err == nil { + c.conCtxMgr.CleanAllAndStore(ctx, url, stream) + return nil + } + + if err != nil && c.option.EnableForwarding { + // The reason we need to judge if the error code is equal to "Canceled" here is that + // when we create a stream we use a goroutine to manually control the timeout of the connection. + // There is no need to wait for the transport layer timeout which can reduce the time of unavailability. + // But it conflicts with the retry mechanism since we use the error code to decide if it is caused by network error. + // And actually the `Canceled` error can be regarded as a kind of network error in some way. + if rpcErr, ok := status.FromError(err); ok && (errs.IsNetworkError(rpcErr.Code()) || rpcErr.Code() == codes.Canceled) { + networkErrNum++ + } + } + cancel() + } else { + networkErrNum++ + } + select { + case <-ctx.Done(): + return err + case <-ticker.C: + } + } + + if networkErrNum == constants.MaxRetryTimes { + // encounter the network error + backupClientConn, backupURL := c.backupClientConn() + if backupClientConn != nil { + log.Info("[tso] fall back to use follower to forward tso stream", zap.String("follower-url", backupURL)) + forwardedHost := c.getLeaderURL() + if len(forwardedHost) == 0 { + return errors.Errorf("cannot find the tso leader") + } + + // create the follower stream + cctx, cancel := context.WithCancel(ctx) + cctx = grpcutil.BuildForwardContext(cctx, forwardedHost) + stream, err = c.tsoStreamBuilderFactory.makeBuilder(backupClientConn).build(cctx, cancel, c.option.Timeout) + if err == nil { + forwardedHostTrim := tlsutil.TrimHTTPPrefix(forwardedHost) + addr := tlsutil.TrimHTTPPrefix(backupURL) + // the goroutine is used to check the network and change back to the original stream + go c.checkLeader(ctx, cancel, forwardedHostTrim, addr, url) + metrics.RequestForwarded.WithLabelValues(forwardedHostTrim, addr).Set(1) + c.conCtxMgr.CleanAllAndStore(ctx, backupURL, stream) + return nil + } + cancel() + } + } + return err +} + +func (c *Cli) checkLeader( + ctx context.Context, + forwardCancel context.CancelFunc, + forwardedHostTrim, addr, url string, +) { + defer func() { + // cancel the forward stream + forwardCancel() + metrics.RequestForwarded.WithLabelValues(forwardedHostTrim, addr).Set(0) + }() + cc, u := c.getTSOLeaderClientConn() + var healthCli healthpb.HealthClient + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + for { + // the tso leader change, we need to re-establish the stream + if u != url { + log.Info("[tso] the tso leader is changed", zap.String("origin", url), zap.String("new", u)) + return + } + if healthCli == nil && cc != nil { + healthCli = healthpb.NewHealthClient(cc) + } + if healthCli != nil { + healthCtx, healthCancel := context.WithTimeout(ctx, c.option.Timeout) + resp, err := healthCli.Check(healthCtx, &healthpb.HealthCheckRequest{Service: ""}) + failpoint.Inject("unreachableNetwork", func() { + resp.Status = healthpb.HealthCheckResponse_UNKNOWN + }) + healthCancel() + if err == nil && resp.GetStatus() == healthpb.HealthCheckResponse_SERVING { + // create a stream of the original tso leader + cctx, cancel := context.WithCancel(ctx) + stream, err := c.tsoStreamBuilderFactory.makeBuilder(cc).build(cctx, cancel, c.option.Timeout) + if err == nil && stream != nil { + log.Info("[tso] recover the original tso stream since the network has become normal", zap.String("url", url)) + c.conCtxMgr.CleanAllAndStore(ctx, url, stream) + return + } + } + } + select { + case <-ctx.Done(): + return + case <-ticker.C: + // To ensure we can get the latest tso leader and once it's changed, we can exit this function. + cc, u = c.getTSOLeaderClientConn() + } + } +} + +// tryConnectToTSOWithProxy will create multiple streams to all the service endpoints to work as +// a TSO proxy to reduce the pressure of the main serving service endpoint. +func (c *Cli) tryConnectToTSOWithProxy(ctx context.Context) error { + tsoStreamBuilders := c.getAllTSOStreamBuilders() + leaderAddr := c.svcDiscovery.GetServingURL() + forwardedHost := c.getLeaderURL() + if len(forwardedHost) == 0 { + return errors.Errorf("cannot find the tso leader") + } + // GC the stale one. + c.conCtxMgr.GC(func(addr string) bool { + _, ok := tsoStreamBuilders[addr] + if !ok { + log.Info("[tso] remove the stale tso stream", + zap.String("addr", addr)) + } + return !ok + }) + // Update the missing one. + for addr, tsoStreamBuilder := range tsoStreamBuilders { + if c.conCtxMgr.Exist(addr) { + continue + } + log.Info("[tso] try to create tso stream", zap.String("addr", addr)) + cctx, cancel := context.WithCancel(ctx) + // Do not proxy the leader client. + if addr != leaderAddr { + log.Info("[tso] use follower to forward tso stream to do the proxy", + zap.String("addr", addr)) + cctx = grpcutil.BuildForwardContext(cctx, forwardedHost) + } + // Create the TSO stream. + stream, err := tsoStreamBuilder.build(cctx, cancel, c.option.Timeout) + if err == nil { + if addr != leaderAddr { + forwardedHostTrim := tlsutil.TrimHTTPPrefix(forwardedHost) + addrTrim := tlsutil.TrimHTTPPrefix(addr) + metrics.RequestForwarded.WithLabelValues(forwardedHostTrim, addrTrim).Set(1) + } + c.conCtxMgr.Store(ctx, addr, stream) + continue + } + log.Error("[tso] create the tso stream failed", + zap.String("addr", addr), errs.ZapError(err)) + cancel() + } + return nil +} + +// getAllTSOStreamBuilders returns a TSO stream builder for every service endpoint of TSO leader/followers +// or of keyspace group primary/secondaries. +func (c *Cli) getAllTSOStreamBuilders() map[string]tsoStreamBuilder { + var ( + addrs = c.svcDiscovery.GetServiceURLs() + streamBuilders = make(map[string]tsoStreamBuilder, len(addrs)) + cc *grpc.ClientConn + err error + ) + for _, addr := range addrs { + if len(addrs) == 0 { + continue + } + if cc, err = c.svcDiscovery.GetOrCreateGRPCConn(addr); err != nil { + continue + } + healthCtx, healthCancel := context.WithTimeout(c.ctx, c.option.Timeout) + resp, err := healthpb.NewHealthClient(cc).Check(healthCtx, &healthpb.HealthCheckRequest{Service: ""}) + healthCancel() + if err == nil && resp.GetStatus() == healthpb.HealthCheckResponse_SERVING { + streamBuilders[addr] = c.tsoStreamBuilderFactory.makeBuilder(cc) + } + } + return streamBuilders +} + +// tryCreateTSODispatcher will try to create the TSO dispatcher if it is not created yet. +func (c *Cli) tryCreateTSODispatcher() { + // The dispatcher is already created. + if c.getDispatcher() != nil { + return + } + // The TSO leader is not ready. + url := c.getLeaderURL() + if len(url) == 0 { + return + } + dispatcher := newTSODispatcher(c.ctx, defaultMaxTSOBatchSize, c) + c.wg.Add(1) + go dispatcher.handleDispatcher(&c.wg) + // Try to set the dispatcher atomically. + if swapped := c.dispatcher.CompareAndSwap(nil, dispatcher); !swapped { + dispatcher.close() + } +} + +// DispatchRequest will send the TSO request to the corresponding TSO dispatcher. +func (c *Cli) DispatchRequest(request *Request) (bool, error) { + if c.getDispatcher() == nil { + err := errs.ErrClientGetTSO.FastGenByArgs("tso dispatcher is not ready") + log.Error("[tso] dispatch tso request error", errs.ZapError(err)) + c.svcDiscovery.ScheduleCheckMemberChanged() + // New dispatcher could be created in the meantime, which is retryable. + return true, err + } + + defer trace.StartRegion(request.requestCtx, "pdclient.tsoReqEnqueue").End() + select { + case <-request.requestCtx.Done(): + // Caller cancelled the request, no need to retry. + return false, request.requestCtx.Err() + case <-request.clientCtx.Done(): + // Client is closed, no need to retry. + return false, request.clientCtx.Err() + case <-c.ctx.Done(): + // tsoClient is closed due to the PD service mode switch, which is retryable. + return true, c.ctx.Err() + default: + // This failpoint will increase the possibility that the request is sent to a closed dispatcher. + failpoint.Inject("delayDispatchTSORequest", func() { + time.Sleep(time.Second) + }) + c.getDispatcher().push(request) + } + // Check the contexts again to make sure the request is not been sent to a closed dispatcher. + // Never retry on these conditions to prevent unexpected data race. + select { + case <-request.requestCtx.Done(): + return false, request.requestCtx.Err() + case <-request.clientCtx.Done(): + return false, request.clientCtx.Err() + case <-c.ctx.Done(): + return false, c.ctx.Err() + default: + } + return false, nil +} diff --git a/client/go.mod b/client/go.mod index 423f390c72c..4e1208cd06d 100644 --- a/client/go.mod +++ b/client/go.mod @@ -8,8 +8,13 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/opentracing/opentracing-go v1.2.0 github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c +<<<<<<< HEAD github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 github.com/pingcap/kvproto v0.0.0-20230727073445-53e1f8730c30 +======= + github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86 + github.com/pingcap/kvproto v0.0.0-20241120071417-b5b7843d9037 +>>>>>>> 7a30ebc97 (server: advance ServerStart check (#8951)) github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 github.com/prometheus/client_golang v1.18.0 github.com/stretchr/testify v1.8.2 diff --git a/client/go.sum b/client/go.sum index ddfa97ed184..f527caabe88 100644 --- a/client/go.sum +++ b/client/go.sum @@ -39,13 +39,19 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c h1:xpW9bvK+HuuTmyFqUwr+jcCvpVkK7sumiz+ko5H9eq4= github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= +<<<<<<< HEAD github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 h1:C3N3itkduZXDZFh4N3vQ5HEtld3S+Y+StULhWVvumU0= github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= github.com/pingcap/kvproto v0.0.0-20230727073445-53e1f8730c30 h1:EvqKcDT7ceGLW0mXqM8Cp5Z8DfgQRnwj2YTnlCLj2QI= github.com/pingcap/kvproto v0.0.0-20230727073445-53e1f8730c30/go.mod h1:r0q/CFcwvyeRhKtoqzmWMBebrtpIziQQ9vR+JKh1knc= +======= +github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86 h1:tdMsjOqUR7YXHoBitzdebTvOjs/swniBTOLy5XiMtuE= +github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86/go.mod h1:exzhVYca3WRtd6gclGNErRWb1qEgff3LYta0LvRmON4= +github.com/pingcap/kvproto v0.0.0-20241120071417-b5b7843d9037 h1:xYNSJjYNur4Dr5bV+9BXK9n5E0T1zlcAN25XX68+mOg= +github.com/pingcap/kvproto v0.0.0-20241120071417-b5b7843d9037/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8= +>>>>>>> 7a30ebc97 (server: advance ServerStart check (#8951)) github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 h1:HR/ylkkLmGdSSDaD8IDP+SZrdhV1Kibl9KrHxJ9eciw= github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -63,7 +69,6 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= @@ -153,7 +158,6 @@ google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7 google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= @@ -162,7 +166,6 @@ gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYs gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/go.mod b/go.mod index 242b3535eb3..3c0ffed0a82 100644 --- a/go.mod +++ b/go.mod @@ -38,8 +38,13 @@ require ( github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d github.com/pingcap/errcode v0.3.0 github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c +<<<<<<< HEAD github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 github.com/pingcap/kvproto v0.0.0-20230920042517-db656f45023b +======= + github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86 + github.com/pingcap/kvproto v0.0.0-20241120071417-b5b7843d9037 +>>>>>>> 7a30ebc97 (server: advance ServerStart check (#8951)) github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21 github.com/pingcap/tidb-dashboard v0.0.0-20241212093248-834dbcafa291 diff --git a/go.sum b/go.sum index 89e77d3b1d3..7e849fdcec2 100644 --- a/go.sum +++ b/go.sum @@ -421,12 +421,11 @@ github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12/go.mod h1:PYMCGwN0JH github.com/pingcap/errcode v0.3.0 h1:IF6LC/4+b1KNwrMlr2rBTUrojFPMexXBcDWZSpNwxjg= github.com/pingcap/errcode v0.3.0/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c h1:xpW9bvK+HuuTmyFqUwr+jcCvpVkK7sumiz+ko5H9eq4= github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= -github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 h1:C3N3itkduZXDZFh4N3vQ5HEtld3S+Y+StULhWVvumU0= -github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= +github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86 h1:tdMsjOqUR7YXHoBitzdebTvOjs/swniBTOLy5XiMtuE= +github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86/go.mod h1:exzhVYca3WRtd6gclGNErRWb1qEgff3LYta0LvRmON4= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/kvproto v0.0.0-20230920042517-db656f45023b h1:XwwIxepR+uuSYWhdQtstEdr67XUE7X6lpSIHVh5iWjs= github.com/pingcap/kvproto v0.0.0-20230920042517-db656f45023b/go.mod h1:r0q/CFcwvyeRhKtoqzmWMBebrtpIziQQ9vR+JKh1knc= diff --git a/server/grpc_service.go b/server/grpc_service.go index 81b55c04e61..a3a1876a66a 100644 --- a/server/grpc_service.go +++ b/server/grpc_service.go @@ -193,7 +193,14 @@ func (s *GrpcServer) GetMinTS( // GetMinTSFromTSOService queries all tso servers and gets the minimum timestamp across // all keyspace groups. +<<<<<<< HEAD func (s *GrpcServer) GetMinTSFromTSOService(dcLocation string) (*pdpb.Timestamp, error) { +======= +func (s *GrpcServer) GetMinTSFromTSOService() (*pdpb.Timestamp, error) { + if s.IsClosed() { + return nil, errs.ErrNotStarted + } +>>>>>>> 7a30ebc97 (server: advance ServerStart check (#8951)) addrs := s.keyspaceGroupManager.GetTSOServiceAddrs() if len(addrs) == 0 { return &pdpb.Timestamp{}, errs.ErrGetMinTS.FastGenByArgs("no tso servers/pods discovered") @@ -392,9 +399,19 @@ func (s *GrpcServer) Tso(stream pdpb.PD_TsoServer) error { return errors.WithStack(err) } +<<<<<<< HEAD if forwardedHost, err := s.getForwardedHost(ctx, stream.Context()); err != nil { return err } else if len(forwardedHost) > 0 { +======= + // TSO uses leader lease to determine validity. No need to check leader here. + if s.IsClosed() { + return errs.ErrNotStarted + } + + forwardedHost := grpcutil.GetForwardedHost(stream.Context()) + if !s.isLocalRequest(forwardedHost) { +>>>>>>> 7a30ebc97 (server: advance ServerStart check (#8951)) clientConn, err := s.getDelegateClient(s.ctx, forwardedHost) if err != nil { return errors.WithStack(err) @@ -412,6 +429,7 @@ func (s *GrpcServer) Tso(stream pdpb.PD_TsoServer) error { } start := time.Now() +<<<<<<< HEAD // TSO uses leader lease to determine validity. No need to check leader here. if s.IsClosed() { return status.Errorf(codes.Unknown, "server not started") @@ -419,6 +437,10 @@ func (s *GrpcServer) Tso(stream pdpb.PD_TsoServer) error { if request.GetHeader().GetClusterId() != s.clusterID { return status.Errorf(codes.FailedPrecondition, "mismatch cluster id, need %d but got %d", s.clusterID, request.GetHeader().GetClusterId()) +======= + if clusterID := keypath.ClusterID(); request.GetHeader().GetClusterId() != clusterID { + return errs.ErrMismatchClusterID(clusterID, request.GetHeader().GetClusterId()) +>>>>>>> 7a30ebc97 (server: advance ServerStart check (#8951)) } count := request.GetCount() ctx, task := trace.NewTask(ctx, "tso") @@ -778,7 +800,23 @@ func (s *GrpcServer) AllocID(ctx context.Context, request *pdpb.AllocIDRequest) } // IsSnapshotRecovering implements gRPC PDServer. +<<<<<<< HEAD func (s *GrpcServer) IsSnapshotRecovering(ctx context.Context, request *pdpb.IsSnapshotRecoveringRequest) (*pdpb.IsSnapshotRecoveringResponse, error) { +======= +func (s *GrpcServer) IsSnapshotRecovering(ctx context.Context, _ *pdpb.IsSnapshotRecoveringRequest) (*pdpb.IsSnapshotRecoveringResponse, error) { + if s.GetServiceMiddlewarePersistOptions().IsGRPCRateLimitEnabled() { + fName := currentFunction() + limiter := s.GetGRPCRateLimiter() + if done, err := limiter.Allow(fName); err == nil { + defer done() + } else { + return nil, errs.ErrGRPCRateLimitExceeded(err) + } + } + if s.IsClosed() { + return nil, errs.ErrNotStarted + } +>>>>>>> 7a30ebc97 (server: advance ServerStart check (#8951)) // recovering mark is stored in etcd directly, there's no need to forward. marked, err := s.Server.IsSnapshotRecovering(ctx) if err != nil { diff --git a/server/server.go b/server/server.go index b30173209d3..7f7c038f7a7 100644 --- a/server/server.go +++ b/server/server.go @@ -503,6 +503,18 @@ func (s *Server) startServer(ctx context.Context) error { cb() } +<<<<<<< HEAD +======= + // to init all rate limiter and metrics + for service := range s.serviceLabels { + s.serviceRateLimiter.Update(service, ratelimit.InitLimiter()) + } + for service := range s.grpcServiceLabels { + s.grpcServiceRateLimiter.Update(service, ratelimit.InitLimiter()) + } + + failpoint.InjectCall("delayStartServer") +>>>>>>> 7a30ebc97 (server: advance ServerStart check (#8951)) // Server has started. atomic.StoreInt64(&s.isRunning, 1) serverMaxProcs.Set(float64(runtime.GOMAXPROCS(0))) diff --git a/tests/integrations/client/client_test.go b/tests/integrations/client/client_test.go index bb4d6851fd0..1d834dae552 100644 --- a/tests/integrations/client/client_test.go +++ b/tests/integrations/client/client_test.go @@ -286,9 +286,19 @@ func TestTSOFollowerProxy(t *testing.T) { defer cluster.Destroy() endpoints := runServer(re, cluster) +<<<<<<< HEAD cli1 := setupCli(re, ctx, endpoints) cli2 := setupCli(re, ctx, endpoints) cli2.UpdateOption(pd.EnableTSOFollowerProxy, true) +======= + cli1 := setupCli(ctx, re, endpoints) + defer cli1.Close() + cli2 := setupCli(ctx, re, endpoints) + defer cli2.Close() + re.NoError(failpoint.Enable("github.com/tikv/pd/client/clients/tso/speedUpTsoDispatcherUpdateInterval", "return(true)")) + err = cli2.UpdateOption(opt.EnableTSOFollowerProxy, true) + re.NoError(err) +>>>>>>> 7a30ebc97 (server: advance ServerStart check (#8951)) var wg sync.WaitGroup wg.Add(tsoRequestConcurrencyNumber) @@ -312,6 +322,94 @@ func TestTSOFollowerProxy(t *testing.T) { }() } wg.Wait() +<<<<<<< HEAD +======= + + followerServer := cluster.GetServer(cluster.GetFollower()) + re.NoError(followerServer.Stop()) + ch := make(chan struct{}) + re.NoError(failpoint.EnableCall("github.com/tikv/pd/server/delayStartServer", func() { + // Server is not in `Running` state, so the follower proxy should return + // error while create stream. + ch <- struct{}{} + })) + wg.Add(1) + go func() { + defer wg.Done() + re.NoError(followerServer.Run()) + }() + re.Eventually(func() bool { + _, _, err := cli2.GetTS(context.Background()) + if err == nil { + return false + } + return strings.Contains(err.Error(), "server not started") + }, 3*time.Second, 10*time.Millisecond) + <-ch + re.NoError(failpoint.Disable("github.com/tikv/pd/server/delayStartServer")) + re.NoError(failpoint.Disable("github.com/tikv/pd/client/clients/tso/speedUpTsoDispatcherUpdateInterval")) + wg.Wait() + + // Disable the follower proxy and check if the stream is updated. + err = cli2.UpdateOption(opt.EnableTSOFollowerProxy, false) + re.NoError(err) + + wg.Add(tsoRequestConcurrencyNumber) + for range tsoRequestConcurrencyNumber { + go func() { + defer wg.Done() + var lastTS uint64 + for range tsoRequestRound { + physical, logical, err := cli2.GetTS(context.Background()) + if err != nil { + // It can only be the context canceled error caused by the stale stream cleanup. + re.ErrorContains(err, "context canceled") + continue + } + re.NoError(err) + ts := tsoutil.ComposeTS(physical, logical) + re.Less(lastTS, ts) + lastTS = ts + // After requesting with the follower proxy, request with the leader directly. + physical, logical, err = cli1.GetTS(context.Background()) + re.NoError(err) + ts = tsoutil.ComposeTS(physical, logical) + re.Less(lastTS, ts) + lastTS = ts + } + // Ensure at least one request is successful. + re.NotEmpty(lastTS) + }() + } + wg.Wait() +} + +func TestTSOFollowerProxyWithTSOService(t *testing.T) { + re := require.New(t) + re.NoError(failpoint.Enable("github.com/tikv/pd/client/servicediscovery/fastUpdateServiceMode", `return(true)`)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestAPICluster(ctx, 1) + re.NoError(err) + defer cluster.Destroy() + err = cluster.RunInitialServers() + re.NoError(err) + leaderName := cluster.WaitLeader() + pdLeaderServer := cluster.GetServer(leaderName) + re.NoError(pdLeaderServer.BootstrapCluster()) + backendEndpoints := pdLeaderServer.GetAddr() + tsoCluster, err := tests.NewTestTSOCluster(ctx, 2, backendEndpoints) + re.NoError(err) + defer tsoCluster.Destroy() + time.Sleep(100 * time.Millisecond) + cli := mcs.SetupClientWithKeyspaceID(ctx, re, constant.DefaultKeyspaceID, strings.Split(backendEndpoints, ",")) + re.NotNil(cli) + defer cli.Close() + // TSO service does not support the follower proxy, so enabling it should fail. + err = cli.UpdateOption(opt.EnableTSOFollowerProxy, true) + re.Error(err) + re.NoError(failpoint.Disable("github.com/tikv/pd/client/servicediscovery/fastUpdateServiceMode")) +>>>>>>> 7a30ebc97 (server: advance ServerStart check (#8951)) } // TestUnavailableTimeAfterLeaderIsReady is used to test https://github.com/tikv/pd/issues/5207 diff --git a/tests/integrations/client/go.mod b/tests/integrations/client/go.mod index 9ccec3da2e8..3b59e78f203 100644 --- a/tests/integrations/client/go.mod +++ b/tests/integrations/client/go.mod @@ -11,8 +11,21 @@ replace ( require ( github.com/docker/go-units v0.4.0 +<<<<<<< HEAD:tests/integrations/client/go.mod github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 github.com/pingcap/kvproto v0.0.0-20230920042517-db656f45023b +======= + github.com/gin-contrib/cors v1.6.0 + github.com/gin-contrib/gzip v0.0.1 + github.com/gin-contrib/pprof v1.4.0 + github.com/gin-gonic/gin v1.10.0 + github.com/go-echarts/go-echarts v1.0.0 + github.com/influxdata/tdigest v0.0.1 + github.com/mattn/go-shellwords v1.0.12 + github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c + github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86 + github.com/pingcap/kvproto v0.0.0-20241120071417-b5b7843d9037 +>>>>>>> 7a30ebc97 (server: advance ServerStart check (#8951)):tools/go.mod github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 github.com/stretchr/testify v1.9.0 github.com/tikv/pd v0.0.0-00010101000000-000000000000 diff --git a/tests/integrations/mcs/go.mod b/tests/integrations/mcs/go.mod index ea0fc0bf83d..7f6805cadd3 100644 --- a/tests/integrations/mcs/go.mod +++ b/tests/integrations/mcs/go.mod @@ -10,9 +10,18 @@ replace ( // reset grpc and protobuf deps in order to import client and server at the same time require ( +<<<<<<< HEAD:tests/integrations/mcs/go.mod github.com/docker/go-units v0.4.0 github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 github.com/pingcap/kvproto v0.0.0-20230920042517-db656f45023b +======= + github.com/DATA-DOG/go-sqlmock v1.5.0 + github.com/docker/go-units v0.5.0 + github.com/go-sql-driver/mysql v1.7.0 + github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c + github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86 + github.com/pingcap/kvproto v0.0.0-20241120071417-b5b7843d9037 +>>>>>>> 7a30ebc97 (server: advance ServerStart check (#8951)):tests/integrations/go.mod github.com/pingcap/log v1.1.1-0.20221110025148-ca232912c9f3 github.com/stretchr/testify v1.9.0 github.com/tikv/pd v0.0.0-00010101000000-000000000000 diff --git a/tests/integrations/mcs/go.sum b/tests/integrations/mcs/go.sum index 8332e46213c..d80ed19d353 100644 --- a/tests/integrations/mcs/go.sum +++ b/tests/integrations/mcs/go.sum @@ -389,12 +389,16 @@ github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12/go.mod h1:PYMCGwN0JH github.com/pingcap/errcode v0.3.0 h1:IF6LC/4+b1KNwrMlr2rBTUrojFPMexXBcDWZSpNwxjg= github.com/pingcap/errcode v0.3.0/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c h1:xpW9bvK+HuuTmyFqUwr+jcCvpVkK7sumiz+ko5H9eq4= github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= +<<<<<<< HEAD:tests/integrations/mcs/go.sum github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00 h1:C3N3itkduZXDZFh4N3vQ5HEtld3S+Y+StULhWVvumU0= github.com/pingcap/failpoint v0.0.0-20210918120811-547c13e3eb00/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= +======= +github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86 h1:tdMsjOqUR7YXHoBitzdebTvOjs/swniBTOLy5XiMtuE= +github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86/go.mod h1:exzhVYca3WRtd6gclGNErRWb1qEgff3LYta0LvRmON4= +>>>>>>> 7a30ebc97 (server: advance ServerStart check (#8951)):tests/integrations/go.sum github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/kvproto v0.0.0-20230920042517-db656f45023b h1:XwwIxepR+uuSYWhdQtstEdr67XUE7X6lpSIHVh5iWjs= github.com/pingcap/kvproto v0.0.0-20230920042517-db656f45023b/go.mod h1:r0q/CFcwvyeRhKtoqzmWMBebrtpIziQQ9vR+JKh1knc= @@ -447,9 +451,14 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/samber/lo v1.37.0 h1:XjVcB8g6tgUp8rsPsJ2CvhClfImrpL04YpQHXeHPhRw= github.com/samber/lo v1.37.0/go.mod h1:9vaz2O4o8oOnK23pd2TrXufcbdbJIa3b6cstBWKpopA= +<<<<<<< HEAD:tests/integrations/mcs/go.sum github.com/sasha-s/go-deadlock v0.2.0 h1:lMqc+fUb7RrFS3gQLtoQsJ7/6TV/pAIFvBsqX73DK8Y= github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +======= +github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU= +github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U= +>>>>>>> 7a30ebc97 (server: advance ServerStart check (#8951)):tests/integrations/go.sum github.com/shirou/gopsutil/v3 v3.21.12/go.mod h1:BToYZVTlSVlfazpDDYFnsVZLaoRG+g8ufT6fPQLdJzA= github.com/shirou/gopsutil/v3 v3.23.3 h1:Syt5vVZXUDXPEXpIBt5ziWsJ4LdSAAxF4l/xZeQgSEE= github.com/shirou/gopsutil/v3 v3.23.3/go.mod h1:lSBNN6t3+D6W5e5nXTxc8KIMMVxAcS+6IJlffjRRlMU= @@ -770,7 +779,6 @@ google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/tests/integrations/tso/go.sum b/tests/integrations/tso/go.sum index 0bce97b436d..1cddeb096f7 100644 --- a/tests/integrations/tso/go.sum +++ b/tests/integrations/tso/go.sum @@ -383,12 +383,16 @@ github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12/go.mod h1:PYMCGwN0JH github.com/pingcap/errcode v0.3.0 h1:IF6LC/4+b1KNwrMlr2rBTUrojFPMexXBcDWZSpNwxjg= github.com/pingcap/errcode v0.3.0/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM= github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c h1:xpW9bvK+HuuTmyFqUwr+jcCvpVkK7sumiz+ko5H9eq4= github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= +<<<<<<< HEAD:tests/integrations/tso/go.sum github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c h1:CgbKAHto5CQgWM9fSBIvaxsJHuGP0uM74HXtv3MyyGQ= github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c/go.mod h1:4qGtCB0QK0wBzKtFEGDhxXnSnbQApw1gc9siScUl8ew= +======= +github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86 h1:tdMsjOqUR7YXHoBitzdebTvOjs/swniBTOLy5XiMtuE= +github.com/pingcap/failpoint v0.0.0-20240528011301-b51a646c7c86/go.mod h1:exzhVYca3WRtd6gclGNErRWb1qEgff3LYta0LvRmON4= +>>>>>>> 7a30ebc97 (server: advance ServerStart check (#8951)):tools/go.sum github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/kvproto v0.0.0-20230920042517-db656f45023b h1:XwwIxepR+uuSYWhdQtstEdr67XUE7X6lpSIHVh5iWjs= github.com/pingcap/kvproto v0.0.0-20230920042517-db656f45023b/go.mod h1:r0q/CFcwvyeRhKtoqzmWMBebrtpIziQQ9vR+JKh1knc= @@ -441,9 +445,14 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/samber/lo v1.37.0 h1:XjVcB8g6tgUp8rsPsJ2CvhClfImrpL04YpQHXeHPhRw= github.com/samber/lo v1.37.0/go.mod h1:9vaz2O4o8oOnK23pd2TrXufcbdbJIa3b6cstBWKpopA= +<<<<<<< HEAD:tests/integrations/tso/go.sum github.com/sasha-s/go-deadlock v0.2.0 h1:lMqc+fUb7RrFS3gQLtoQsJ7/6TV/pAIFvBsqX73DK8Y= github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +======= +github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU= +github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U= +>>>>>>> 7a30ebc97 (server: advance ServerStart check (#8951)):tools/go.sum github.com/shirou/gopsutil/v3 v3.21.12/go.mod h1:BToYZVTlSVlfazpDDYFnsVZLaoRG+g8ufT6fPQLdJzA= github.com/shirou/gopsutil/v3 v3.23.3 h1:Syt5vVZXUDXPEXpIBt5ziWsJ4LdSAAxF4l/xZeQgSEE= github.com/shirou/gopsutil/v3 v3.23.3/go.mod h1:lSBNN6t3+D6W5e5nXTxc8KIMMVxAcS+6IJlffjRRlMU= @@ -764,7 +773,6 @@ google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=