diff --git a/arbnode/api.go b/arbnode/api.go index 2dabd41bff..042266f9dd 100644 --- a/arbnode/api.go +++ b/arbnode/api.go @@ -19,7 +19,7 @@ type BlockValidatorAPI struct { } func (a *BlockValidatorAPI) LatestValidated(ctx context.Context) (*staker.GlobalStateValidatedInfo, error) { - return a.val.ReadLastValidatedInfo() + return a.val.ReadLastValidatedInfo(ctx) } type BlockValidatorDebugAPI struct { diff --git a/arbnode/node.go b/arbnode/node.go index c5b3bbe071..a162ce8a28 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -621,11 +621,13 @@ func createNodeImpl( var blockValidator *staker.BlockValidator if config.ValidatorRequired() { blockValidator, err = staker.NewBlockValidator( + ctx, statelessBlockValidator, inboxTracker, txStreamer, func() *staker.BlockValidatorConfig { return &configFetcher.Get().BlockValidator }, fatalErrChan, + stack, ) if err != nil { return nil, err diff --git a/staker/block_validator.go b/staker/block_validator.go index 5a1f123693..8a4535c0fd 100644 --- a/staker/block_validator.go +++ b/staker/block_validator.go @@ -21,6 +21,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/rlp" "github.com/offchainlabs/nitro/arbnode/resourcemanager" "github.com/offchainlabs/nitro/arbutil" @@ -102,12 +103,15 @@ type BlockValidator struct { fatalErr chan<- error MemoryFreeLimitChecker resourcemanager.LimitChecker + + remoteBlockValidatorClient *rpcclient.RpcClient } type BlockValidatorConfig struct { Enable bool `koanf:"enable"` RedisValidationClientConfig redis.ValidationClientConfig `koanf:"redis-validation-client-config"` ValidationServer rpcclient.ClientConfig `koanf:"validation-server" reload:"hot"` + RemoteBlockValidatorServer rpcclient.ClientConfig `koanf:"remote-block-validator-server" reload:"hot"` ValidationServerConfigs []rpcclient.ClientConfig `koanf:"validation-server-configs"` ValidationPoll time.Duration `koanf:"validation-poll" reload:"hot"` PrerecordedBlocks uint64 `koanf:"prerecorded-blocks" reload:"hot"` @@ -178,6 +182,7 @@ type BlockValidatorConfigFetcher func() *BlockValidatorConfig func BlockValidatorConfigAddOptions(prefix string, f *pflag.FlagSet) { f.Bool(prefix+".enable", DefaultBlockValidatorConfig.Enable, "enable block-by-block validation") rpcclient.RPCClientAddOptions(prefix+".validation-server", f, &DefaultBlockValidatorConfig.ValidationServer) + rpcclient.RPCClientAddOptions(prefix+".remote-block-validator-server", f, &DefaultBlockValidatorConfig.RemoteBlockValidatorServer) redis.ValidationClientConfigAddOptions(prefix+".redis-validation-client-config", f) f.String(prefix+".validation-server-configs-list", DefaultBlockValidatorConfig.ValidationServerConfigsList, "array of execution rpc configs given as a json string. time duration should be supplied in number indicating nanoseconds") f.Duration(prefix+".validation-poll", DefaultBlockValidatorConfig.ValidationPoll, "poll time to check validations") @@ -201,6 +206,7 @@ var DefaultBlockValidatorConfig = BlockValidatorConfig{ Enable: false, ValidationServerConfigsList: "default", ValidationServer: rpcclient.DefaultClientConfig, + RemoteBlockValidatorServer: rpcclient.DefaultClientConfig, RedisValidationClientConfig: redis.DefaultValidationClientConfig, ValidationPoll: time.Second, ForwardBlocks: 128, @@ -273,20 +279,33 @@ func (s *validationStatus) profileStep() int64 { } func NewBlockValidator( + ctx context.Context, statelessBlockValidator *StatelessBlockValidator, inbox InboxTrackerInterface, streamer TransactionStreamerInterface, config BlockValidatorConfigFetcher, fatalErr chan<- error, + stack *node.Node, ) (*BlockValidator, error) { + remoteBlockValidatorServerUrl := config().RemoteBlockValidatorServer.URL + var remoteBlockValidatorClient *rpcclient.RpcClient + if len(remoteBlockValidatorServerUrl) > 0 && remoteBlockValidatorServerUrl != "self" && remoteBlockValidatorServerUrl != "self-auth" { + confFetcher := func() *rpcclient.ClientConfig { return &config().RemoteBlockValidatorServer } + remoteBlockValidatorClient = rpcclient.NewRpcClient(confFetcher, stack) + err := remoteBlockValidatorClient.Start(ctx) + if err != nil { + return nil, err + } + } ret := &BlockValidator{ - StatelessBlockValidator: statelessBlockValidator, - createNodesChan: make(chan struct{}, 1), - sendRecordChan: make(chan struct{}, 1), - progressValidationsChan: make(chan struct{}, 1), - config: config, - fatalErr: fatalErr, - prevBatchCache: make(map[uint64][]byte), + StatelessBlockValidator: statelessBlockValidator, + createNodesChan: make(chan struct{}, 1), + sendRecordChan: make(chan struct{}, 1), + progressValidationsChan: make(chan struct{}, 1), + config: config, + fatalErr: fatalErr, + prevBatchCache: make(map[uint64][]byte), + remoteBlockValidatorClient: remoteBlockValidatorClient, } valInputsWriter, err := inputs.NewWriter( inputs.WithBaseDir(ret.stack.InstanceDir()), @@ -296,7 +315,7 @@ func NewBlockValidator( } ret.validationInputsWriter = valInputsWriter if !config().Dangerous.ResetBlockValidation { - validated, err := ret.ReadLastValidatedInfo() + validated, err := ret.ReadLastValidatedInfo(ctx) if err != nil { return nil, err } @@ -421,8 +440,15 @@ func ReadLastValidatedInfo(db ethdb.Database) (*GlobalStateValidatedInfo, error) return &validated, nil } -func (v *BlockValidator) ReadLastValidatedInfo() (*GlobalStateValidatedInfo, error) { - return ReadLastValidatedInfo(v.db) +func (v *BlockValidator) ReadLastValidatedInfo(ctx context.Context) (*GlobalStateValidatedInfo, error) { + if v.remoteBlockValidatorClient == nil { + return ReadLastValidatedInfo(v.db) + } + var res GlobalStateValidatedInfo + if err := v.remoteBlockValidatorClient.CallContext(ctx, &res, "arb_latestValidated"); err != nil { + return nil, err + } + return &res, nil } func (v *BlockValidator) legacyReadLastValidatedInfo() (*legacyLastBlockValidatedDbInfo, error) { @@ -763,7 +789,7 @@ func (v *BlockValidator) iterativeValidationEntryRecorder(ctx context.Context, i } func (v *BlockValidator) iterativeValidationPrint(ctx context.Context) time.Duration { - validated, err := v.ReadLastValidatedInfo() + validated, err := v.ReadLastValidatedInfo(ctx) if err != nil { log.Error("cannot read last validated data from database", "err", err) return time.Second * 30 diff --git a/staker/l1_validator.go b/staker/l1_validator.go index 5b0c211324..721e00f752 100644 --- a/staker/l1_validator.go +++ b/staker/l1_validator.go @@ -291,7 +291,7 @@ func (v *L1Validator) generateNodeAction( var validatedCount arbutil.MessageIndex var validatedGlobalState validator.GoGlobalState if v.blockValidator != nil { - valInfo, err := v.blockValidator.ReadLastValidatedInfo() + valInfo, err := v.blockValidator.ReadLastValidatedInfo(ctx) if err != nil || valInfo == nil { return nil, false, err } diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 67ce260529..5c12d6b0f3 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -11,6 +11,8 @@ import ( "context" "errors" "fmt" + "github.com/ethereum/go-ethereum/node" + "github.com/offchainlabs/nitro/util/rpcclient" "math/big" "strings" "testing" @@ -472,6 +474,392 @@ func stakerTestImpl(t *testing.T, faultyStaker bool, honestStakerInactive bool) } } +func TestRemoteValidator(t *testing.T) { + logHandler := testhelpers.InitTestLog(t, log.LvlTrace) + + ctx, cancelCtx := context.WithCancel(context.Background()) + defer cancelCtx() + srv := externalsignertest.NewServer(t) + go func() { + if err := srv.Start(); err != nil { + log.Error("Failed to start external signer server:", err) + return + } + }() + var transferGas = util.NormalizeL2GasForL1GasInitial(800_000, params.GWei) // include room for aggregator L1 costs + + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + builder.nodeConfig.Staker.Enable = true + builder.nodeConfig.BlockValidator.Enable = true + builder.l2StackConfig.HTTPHost = node.DefaultHTTPHost + builder.l2StackConfig.HTTPPort = node.DefaultHTTPPort + builder.l2StackConfig.HTTPModules = append(builder.l2StackConfig.HTTPModules, "arb") + builder.L2Info = NewBlockChainTestInfo( + t, + types.NewArbitrumSigner(types.NewLondonSigner(builder.chainConfig.ChainID)), big.NewInt(l2pricing.InitialBaseFeeWei*2), + transferGas, + ) + + // For now validation only works with HashScheme set + builder.execConfig.Caching.StateScheme = rawdb.HashScheme + + builder.nodeConfig.BatchPoster.MaxDelay = -1000 * time.Hour + cleanupA := builder.Build(t) + defer cleanupA() + + addNewBatchPoster(ctx, t, builder, srv.Address) + + builder.L1.SendWaitTestTransactions(t, []*types.Transaction{ + builder.L1Info.PrepareTxTo("Faucet", &srv.Address, 30000, big.NewInt(1).Mul(big.NewInt(1e18), big.NewInt(1e18)), nil)}) + + l2nodeA := builder.L2.ConsensusNode + execNodeA := builder.L2.ExecNode + + builder.L2Info.GenerateGenesisAccount("FaultyAddr", common.Big1) + + config := arbnode.ConfigDefaultL1Test() + config.Sequencer = false + config.DelayedSequencer.Enable = false + config.BatchPoster.Enable = false + builder.execConfig.Sequencer.Enable = false + builder.l2StackConfig.HTTPHost = "" + builder.l2StackConfig.HTTPPort = 0 + testClientB, cleanupB := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: config}) + defer cleanupB() + + l2nodeB := testClientB.ConsensusNode + execNodeB := testClientB.ExecNode + + nodeAGenesis := execNodeA.Backend.APIBackend().CurrentHeader().Hash() + nodeBGenesis := execNodeB.Backend.APIBackend().CurrentHeader().Hash() + if nodeAGenesis == nodeBGenesis { + Fatal(t, "node A L2 genesis hash", nodeAGenesis, "== node B L2 genesis hash", nodeBGenesis) + } + + builder.BridgeBalance(t, "Faucet", big.NewInt(1).Mul(big.NewInt(params.Ether), big.NewInt(10000))) + + deployAuth := builder.L1Info.GetDefaultTransactOpts("RollupOwner", ctx) + + balance := big.NewInt(params.Ether) + balance.Mul(balance, big.NewInt(100)) + builder.L1Info.GenerateAccount("ValidatorA") + builder.L1.TransferBalance(t, "Faucet", "ValidatorA", balance, builder.L1Info) + l1authA := builder.L1Info.GetDefaultTransactOpts("ValidatorA", ctx) + + builder.L1Info.GenerateAccount("ValidatorB") + builder.L1.TransferBalance(t, "Faucet", "ValidatorB", balance, builder.L1Info) + l1authB := builder.L1Info.GetDefaultTransactOpts("ValidatorB", ctx) + + rollup, err := rollupgen.NewRollupAdminLogic(l2nodeA.DeployInfo.Rollup, builder.L1.Client) + Require(t, err) + + upgradeExecutor, err := upgrade_executorgen.NewUpgradeExecutor(l2nodeA.DeployInfo.UpgradeExecutor, builder.L1.Client) + Require(t, err, "unable to bind upgrade executor") + rollupABI, err := abi.JSON(strings.NewReader(rollupgen.RollupAdminLogicABI)) + Require(t, err, "unable to parse rollup ABI") + + setMinAssertPeriodCalldata, err := rollupABI.Pack("setMinimumAssertionPeriod", big.NewInt(1)) + Require(t, err, "unable to generate setMinimumAssertionPeriod calldata") + tx, err := upgradeExecutor.ExecuteCall(&deployAuth, l2nodeA.DeployInfo.Rollup, setMinAssertPeriodCalldata) + Require(t, err, "unable to set minimum assertion period") + _, err = builder.L1.EnsureTxSucceeded(tx) + Require(t, err) + + validatorUtils, err := rollupgen.NewValidatorUtils(l2nodeA.DeployInfo.ValidatorUtils, builder.L1.Client) + Require(t, err) + + valConfigA := staker.TestL1ValidatorConfig + parentChainID, err := builder.L1.Client.ChainID(ctx) + if err != nil { + t.Fatalf("Failed to get parent chain id: %v", err) + } + dpA, err := arbnode.StakerDataposter( + ctx, + rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), + l2nodeA.L1Reader, + &l1authA, NewFetcherFromConfig(arbnode.ConfigDefaultL1NonSequencerTest()), + nil, + parentChainID, + ) + if err != nil { + t.Fatalf("Error creating validator dataposter: %v", err) + } + valWalletA, err := validatorwallet.NewContract(dpA, nil, l2nodeA.DeployInfo.ValidatorWalletCreator, l2nodeA.DeployInfo.Rollup, l2nodeA.L1Reader, &l1authA, 0, func(common.Address) {}, func() uint64 { return valConfigA.ExtraGas }) + Require(t, err) + valConfigA.Strategy = "MakeNodes" + + valWalletAddrAPtr, err := validatorwallet.GetValidatorWalletContract(ctx, l2nodeA.DeployInfo.ValidatorWalletCreator, 0, l2nodeA.L1Reader, true, valWalletA.DataPoster(), valWalletA.GetExtraGas()) + Require(t, err) + valWalletAddrA := *valWalletAddrAPtr + valWalletAddrCheck, err := validatorwallet.GetValidatorWalletContract(ctx, l2nodeA.DeployInfo.ValidatorWalletCreator, 0, l2nodeA.L1Reader, true, valWalletA.DataPoster(), valWalletA.GetExtraGas()) + Require(t, err) + if valWalletAddrA == *valWalletAddrCheck { + Require(t, err, "didn't cache validator wallet address", valWalletAddrA.String(), "vs", valWalletAddrCheck.String()) + } + + setValidatorCalldata, err := rollupABI.Pack("setValidator", []common.Address{valWalletAddrA, l1authB.From, srv.Address}, []bool{true, true, true}) + Require(t, err, "unable to generate setValidator calldata") + tx, err = upgradeExecutor.ExecuteCall(&deployAuth, l2nodeA.DeployInfo.Rollup, setValidatorCalldata) + Require(t, err, "unable to set validators") + _, err = builder.L1.EnsureTxSucceeded(tx) + Require(t, err) + + _, valStack := createTestValidationNode(t, ctx, &valnode.TestValidationConfig) + blockValidatorConfig := staker.TestBlockValidatorConfig + + statelessA, err := staker.NewStatelessBlockValidator( + l2nodeA.InboxReader, + l2nodeA.InboxTracker, + l2nodeA.TxStreamer, + execNodeA, + l2nodeA.ArbDB, + nil, + StaticFetcherFrom(t, &blockValidatorConfig), + valStack, + ) + Require(t, err) + err = statelessA.Start(ctx) + Require(t, err) + blockValidatorConfig.RemoteBlockValidatorServer = rpcclient.TestClientConfig + blockValidatorConfig.RemoteBlockValidatorServer.URL = "http://localhost:8545" + blockValidatorA, err := staker.NewBlockValidator( + ctx, statelessA, l2nodeA.InboxTracker, l2nodeA.TxStreamer, func() *staker.BlockValidatorConfig { return &blockValidatorConfig }, make(chan error, 10), valStack, + ) + Require(t, err) + err = blockValidatorA.Start(ctx) + Require(t, err) + stakerA, err := staker.NewStaker( + l2nodeA.L1Reader, + valWalletA, + bind.CallOpts{}, + func() *staker.L1ValidatorConfig { return &valConfigA }, + blockValidatorA, + statelessA, + nil, + nil, + l2nodeA.DeployInfo.ValidatorUtils, + nil, + ) + Require(t, err) + err = stakerA.Initialize(ctx) + if stakerA.Strategy() != staker.WatchtowerStrategy { + err = valWalletA.Initialize(ctx) + Require(t, err) + } + Require(t, err) + cfg := arbnode.ConfigDefaultL1NonSequencerTest() + signerCfg, err := externalSignerTestCfg(srv.Address, srv.URL()) + if err != nil { + t.Fatalf("Error getting external signer config: %v", err) + } + cfg.Staker.DataPoster.ExternalSigner = *signerCfg + dpB, err := arbnode.StakerDataposter( + ctx, + rawdb.NewTable(l2nodeB.ArbDB, storage.StakerPrefix), + l2nodeB.L1Reader, + &l1authB, NewFetcherFromConfig(cfg), + nil, + parentChainID, + ) + if err != nil { + t.Fatalf("Error creating validator dataposter: %v", err) + } + valWalletB, err := validatorwallet.NewEOA(dpB, l2nodeB.DeployInfo.Rollup, l2nodeB.L1Reader.Client(), func() uint64 { return 0 }) + Require(t, err) + valConfigB := staker.TestL1ValidatorConfig + valConfigB.Strategy = "MakeNodes" + statelessB, err := staker.NewStatelessBlockValidator( + l2nodeB.InboxReader, + l2nodeB.InboxTracker, + l2nodeB.TxStreamer, + execNodeB, + l2nodeB.ArbDB, + nil, + StaticFetcherFrom(t, &blockValidatorConfig), + valStack, + ) + Require(t, err) + err = statelessB.Start(ctx) + Require(t, err) + stakerB, err := staker.NewStaker( + l2nodeB.L1Reader, + valWalletB, + bind.CallOpts{}, + func() *staker.L1ValidatorConfig { return &valConfigB }, + nil, + statelessB, + nil, + nil, + l2nodeB.DeployInfo.ValidatorUtils, + nil, + ) + Require(t, err) + err = stakerB.Initialize(ctx) + Require(t, err) + if stakerB.Strategy() != staker.WatchtowerStrategy { + err = valWalletB.Initialize(ctx) + Require(t, err) + } + + builder.L2Info.GenerateAccount("BackgroundUser") + tx = builder.L2Info.PrepareTx("Faucet", "BackgroundUser", builder.L2Info.TransferGas, balance, nil) + err = builder.L2.Client.SendTransaction(ctx, tx) + Require(t, err) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + // Continually make L2 transactions in a background thread + backgroundTxsCtx, cancelBackgroundTxs := context.WithCancel(ctx) + backgroundTxsShutdownChan := make(chan struct{}) + defer (func() { + cancelBackgroundTxs() + <-backgroundTxsShutdownChan + })() + go (func() { + defer close(backgroundTxsShutdownChan) + err := makeBackgroundTxs(backgroundTxsCtx, builder) + if !errors.Is(err, context.Canceled) { + log.Warn("error making background txs", "err", err) + } + })() + + stakerATxs := 0 + stakerAWasStaked := false + stakerBTxs := 0 + stakerBWasStaked := false + sawStakerZombie := false + challengeMangerTimedOut := false + for i := 0; i < 100; i++ { + var stakerName string + if i%2 == 0 { + stakerName = "A" + fmt.Printf("staker A acting:\n") + tx, err = stakerA.Act(ctx) + if tx != nil { + stakerATxs++ + } + } else { + stakerName = "B" + fmt.Printf("staker B acting:\n") + tx, err = stakerB.Act(ctx) + if tx != nil { + stakerBTxs++ + } + } + + if err != nil && strings.Contains(err.Error(), "waiting") { + colors.PrintRed("retrying ", err.Error(), i) + time.Sleep(20 * time.Millisecond) + i-- + continue + } + if err != nil && i%2 == 1 { + // Check if this is an expected error from the faulty staker. + if strings.Contains(err.Error(), "agreed with entire challenge") || strings.Contains(err.Error(), "after msg 0 expected global state") { + // Expected error upon realizing you're losing the challenge. Get ready for a timeout. + if !challengeMangerTimedOut { + // Upgrade the ChallengeManager contract to an implementation which says challenges are always timed out + + mockImpl, tx, _, err := mocksgen.DeployTimedOutChallengeManager(&deployAuth, builder.L1.Client) + Require(t, err) + _, err = builder.L1.EnsureTxSucceeded(tx) + Require(t, err) + + managerAddr := valWalletA.ChallengeManagerAddress() + // 0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103 + proxyAdminSlot := common.BigToHash(arbmath.BigSub(crypto.Keccak256Hash([]byte("eip1967.proxy.admin")).Big(), common.Big1)) + proxyAdminBytes, err := builder.L1.Client.StorageAt(ctx, managerAddr, proxyAdminSlot, nil) + Require(t, err) + proxyAdminAddr := common.BytesToAddress(proxyAdminBytes) + if proxyAdminAddr == (common.Address{}) { + Fatal(t, "failed to get challenge manager proxy admin") + } + + proxyAdminABI, err := abi.JSON(strings.NewReader(mocksgen.ProxyAdminForBindingABI)) + Require(t, err) + upgradeCalldata, err := proxyAdminABI.Pack("upgrade", managerAddr, mockImpl) + Require(t, err) + tx, err = upgradeExecutor.ExecuteCall(&deployAuth, proxyAdminAddr, upgradeCalldata) + Require(t, err) + _, err = builder.L1.EnsureTxSucceeded(tx) + Require(t, err) + + challengeMangerTimedOut = true + } + } else if strings.Contains(err.Error(), "insufficient funds") && sawStakerZombie { + // Expected error when trying to re-stake after losing initial stake. + } else if strings.Contains(err.Error(), "start state not in chain") && sawStakerZombie { + // Expected error when trying to re-stake after the challenger's nodes getting confirmed. + } else if strings.Contains(err.Error(), "STAKER_IS_ZOMBIE") && sawStakerZombie { + // Expected error when the staker is a zombie and thus can't advance its stake. + } else { + Require(t, err, "Faulty staker failed to act") + } + t.Log("got expected faulty staker error", err) + err = nil + tx = nil + } + Require(t, err, "Staker", stakerName, "failed to act") + if tx != nil { + _, err = builder.L1.EnsureTxSucceeded(tx) + Require(t, err, "EnsureTxSucceeded failed for staker", stakerName, "tx") + } + conflictInfo, err := validatorUtils.FindStakerConflict(&bind.CallOpts{}, l2nodeA.DeployInfo.Rollup, l1authA.From, srv.Address, big.NewInt(1024)) + Require(t, err) + if staker.ConflictType(conflictInfo.Ty) == staker.CONFLICT_TYPE_FOUND { + cancelBackgroundTxs() + } + if !sawStakerZombie { + sawStakerZombie, err = rollup.IsZombie(&bind.CallOpts{}, srv.Address) + Require(t, err) + } + isHonestZombie, err := rollup.IsZombie(&bind.CallOpts{}, valWalletAddrA) + Require(t, err) + if isHonestZombie { + Fatal(t, "staker A became a zombie") + } + if !stakerAWasStaked { + stakerAWasStaked, err = rollup.IsStaked(&bind.CallOpts{}, valWalletAddrA) + Require(t, err) + } + if !stakerBWasStaked { + stakerBWasStaked, err = rollup.IsStaked(&bind.CallOpts{}, srv.Address) + Require(t, err) + } + for j := 0; j < 5; j++ { + builder.L1.TransferBalance(t, "Faucet", "Faucet", common.Big0, builder.L1Info) + } + } + + if stakerATxs == 0 || stakerBTxs == 0 { + Fatal(t, "staker didn't make txs: staker A made", stakerATxs, "staker B made", stakerBTxs) + } + + latestConfirmedNode, err := rollup.LatestConfirmed(&bind.CallOpts{}) + Require(t, err) + + if latestConfirmedNode <= 1 { + latestCreatedNode, err := rollup.LatestNodeCreated(&bind.CallOpts{}) + Require(t, err) + Fatal(t, "latest confirmed node didn't advance:", latestConfirmedNode, latestCreatedNode) + } + + if !sawStakerZombie { + Fatal(t, "staker B didn't become a zombie despite being faulty") + } + + if !stakerAWasStaked { + Fatal(t, "staker A was never staked") + } + if !stakerBWasStaked { + Fatal(t, "staker B was never staked") + } + + if logHandler.WasLogged("data poster expected next transaction to have nonce \\d+ but was requested to post transaction with nonce \\d+") { + Fatal(t, "Staker's DataPoster inferred nonce incorrectly") + } +} + func TestStakersCooperative(t *testing.T) { stakerTestImpl(t, false, false) }