From 3a8a8808fa89582f78d100b927675a315495f7ae Mon Sep 17 00:00:00 2001 From: Rachit Sonthalia <54906134+rachit77@users.noreply.github.com> Date: Thu, 10 Oct 2024 17:28:29 +0530 Subject: [PATCH 1/4] test: add test for aggregator (#100) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * test: mockery setuo * feat: make interface exported * test: add aggregator e2e test * test: wip * test: wip * fix: tests * fix: tests * test: wip * test: refactored * fix: Makefile * fix: lint * fix: test * fix: race condition * fix: apply feedback * fix: apply feedback * fix: apply feedback and remove cyclic dependency * fix: remove comments --------- Co-authored-by: Toni Ramírez --- .gitignore | 2 + aggregator/{ => agglayer}/agglayer_client.go | 2 +- aggregator/{ => agglayer}/agglayer_tx.go | 2 +- aggregator/aggregator.go | 36 +- aggregator/aggregator_test.go | 1573 ++++++++++++++++++ aggregator/config.go | 4 +- aggregator/interfaces.go | 60 +- aggregator/mocks/mock_StreamClient.go | 247 +++ aggregator/mocks/mock_agglayer_client.go | 79 + aggregator/mocks/mock_dbtx.go | 350 ++++ aggregator/mocks/mock_eth_tx_manager.go | 258 +++ aggregator/mocks/mock_etherman.go | 210 +++ aggregator/mocks/mock_prover.go | 271 +++ aggregator/mocks/mock_state.go | 406 +++++ aggregator/mocks/mock_synchronizer.go | 321 ++++ aggregator/profitabilitychecker.go | 10 +- test/Makefile | 16 +- 17 files changed, 3813 insertions(+), 34 deletions(-) rename aggregator/{ => agglayer}/agglayer_client.go (99%) rename aggregator/{ => agglayer}/agglayer_tx.go (98%) create mode 100644 aggregator/aggregator_test.go create mode 100644 aggregator/mocks/mock_StreamClient.go create mode 100644 aggregator/mocks/mock_agglayer_client.go create mode 100644 aggregator/mocks/mock_dbtx.go create mode 100644 aggregator/mocks/mock_eth_tx_manager.go create mode 100644 aggregator/mocks/mock_etherman.go create mode 100644 aggregator/mocks/mock_prover.go create mode 100644 aggregator/mocks/mock_state.go create mode 100644 aggregator/mocks/mock_synchronizer.go diff --git a/.gitignore b/.gitignore index fe96efc4..ce4e0058 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,8 @@ book/ index.html tmp .vscode +coverage.out +coverage.html .idea .idea/* diff --git a/aggregator/agglayer_client.go b/aggregator/agglayer/agglayer_client.go similarity index 99% rename from aggregator/agglayer_client.go rename to aggregator/agglayer/agglayer_client.go index 4726ccc1..dbe48fb2 100644 --- a/aggregator/agglayer_client.go +++ b/aggregator/agglayer/agglayer_client.go @@ -1,4 +1,4 @@ -package aggregator +package agglayer import ( "context" diff --git a/aggregator/agglayer_tx.go b/aggregator/agglayer/agglayer_tx.go similarity index 98% rename from aggregator/agglayer_tx.go rename to aggregator/agglayer/agglayer_tx.go index 30a483ae..f024f570 100644 --- a/aggregator/agglayer_tx.go +++ b/aggregator/agglayer/agglayer_tx.go @@ -1,4 +1,4 @@ -package aggregator +package agglayer import ( "crypto/ecdsa" diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 1c07d340..4d887136 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -16,6 +16,7 @@ import ( "github.com/0xPolygon/cdk-rpc/rpc" cdkTypes "github.com/0xPolygon/cdk-rpc/types" + "github.com/0xPolygon/cdk/aggregator/agglayer" ethmanTypes "github.com/0xPolygon/cdk/aggregator/ethmantypes" "github.com/0xPolygon/cdk/aggregator/prover" cdkcommon "github.com/0xPolygon/cdk/common" @@ -66,10 +67,10 @@ type Aggregator struct { cfg Config logger *log.Logger - state stateInterface - etherman etherman - ethTxManager *ethtxmanager.Client - streamClient *datastreamer.StreamClient + state StateInterface + etherman Etherman + ethTxManager EthTxManagerClient + streamClient StreamClient l1Syncr synchronizer.Synchronizer halted atomic.Bool @@ -97,7 +98,7 @@ type Aggregator struct { exit context.CancelFunc sequencerPrivateKey *ecdsa.PrivateKey - aggLayerClient AgglayerClientInterface + aggLayerClient agglayer.AgglayerClientInterface } // New creates a new aggregator. @@ -105,8 +106,8 @@ func New( ctx context.Context, cfg Config, logger *log.Logger, - stateInterface stateInterface, - etherman etherman) (*Aggregator, error) { + stateInterface StateInterface, + etherman Etherman) (*Aggregator, error) { var profitabilityChecker aggregatorTxProfitabilityChecker switch cfg.TxProfitabilityCheckerType { @@ -167,12 +168,12 @@ func New( } var ( - aggLayerClient AgglayerClientInterface + aggLayerClient agglayer.AgglayerClientInterface sequencerPrivateKey *ecdsa.PrivateKey ) if !cfg.SyncModeOnlyEnabled && cfg.SettlementBackend == AggLayer { - aggLayerClient = NewAggLayerClient(cfg.AggLayerURL) + aggLayerClient = agglayer.NewAggLayerClient(cfg.AggLayerURL) sequencerPrivateKey, err = newKeyFromKeystore(cfg.SequencerPrivateKey) if err != nil { @@ -921,10 +922,11 @@ func (a *Aggregator) settleWithAggLayer( inputs ethmanTypes.FinalProofInputs) bool { proofStrNo0x := strings.TrimPrefix(inputs.FinalProof.Proof, "0x") proofBytes := common.Hex2Bytes(proofStrNo0x) - tx := Tx{ + + tx := agglayer.Tx{ LastVerifiedBatch: cdkTypes.ArgUint64(proof.BatchNumber - 1), NewVerifiedBatch: cdkTypes.ArgUint64(proof.BatchNumberFinal), - ZKP: ZKP{ + ZKP: agglayer.ZKP{ NewStateRoot: common.BytesToHash(inputs.NewStateRoot), NewLocalExitRoot: common.BytesToHash(inputs.NewLocalExitRoot), Proof: cdkTypes.ArgBytes(proofBytes), @@ -1013,7 +1015,7 @@ func (a *Aggregator) handleFailureToAddVerifyBatchToBeMonitored(ctx context.Cont // buildFinalProof builds and return the final proof for an aggregated/batch proof. func (a *Aggregator) buildFinalProof( - ctx context.Context, prover proverInterface, proof *state.Proof) (*prover.FinalProof, error) { + ctx context.Context, prover ProverInterface, proof *state.Proof) (*prover.FinalProof, error) { tmpLogger := a.logger.WithFields( "prover", prover.Name(), "proverId", prover.ID(), @@ -1059,7 +1061,7 @@ func (a *Aggregator) buildFinalProof( // build the final proof. If no proof is provided it looks for a previously // generated proof. If the proof is eligible, then the final proof generation // is triggered. -func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover proverInterface, proof *state.Proof) (bool, error) { +func (a *Aggregator) tryBuildFinalProof(ctx context.Context, prover ProverInterface, proof *state.Proof) (bool, error) { proverName := prover.Name() proverID := prover.ID() @@ -1245,7 +1247,7 @@ func (a *Aggregator) unlockProofsToAggregate(ctx context.Context, proof1 *state. } func (a *Aggregator) getAndLockProofsToAggregate( - ctx context.Context, prover proverInterface) (*state.Proof, *state.Proof, error) { + ctx context.Context, prover ProverInterface) (*state.Proof, *state.Proof, error) { tmpLogger := a.logger.WithFields( "prover", prover.Name(), "proverId", prover.ID(), @@ -1293,7 +1295,7 @@ func (a *Aggregator) getAndLockProofsToAggregate( return proof1, proof2, nil } -func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover proverInterface) (bool, error) { +func (a *Aggregator) tryAggregateProofs(ctx context.Context, prover ProverInterface) (bool, error) { proverName := prover.Name() proverID := prover.ID() @@ -1458,7 +1460,7 @@ func (a *Aggregator) getVerifiedBatchAccInputHash(ctx context.Context, batchNumb } func (a *Aggregator) getAndLockBatchToProve( - ctx context.Context, prover proverInterface, + ctx context.Context, prover ProverInterface, ) (*state.Batch, []byte, *state.Proof, error) { proverID := prover.ID() proverName := prover.Name() @@ -1574,7 +1576,7 @@ func (a *Aggregator) getAndLockBatchToProve( return &dbBatch.Batch, dbBatch.Witness, proof, nil } -func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover proverInterface) (bool, error) { +func (a *Aggregator) tryGenerateBatchProof(ctx context.Context, prover ProverInterface) (bool, error) { tmpLogger := a.logger.WithFields( "prover", prover.Name(), "proverId", prover.ID(), diff --git a/aggregator/aggregator_test.go b/aggregator/aggregator_test.go new file mode 100644 index 00000000..f906ebbb --- /dev/null +++ b/aggregator/aggregator_test.go @@ -0,0 +1,1573 @@ +package aggregator + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "encoding/json" + "errors" + "fmt" + "math/big" + "sync" + "sync/atomic" + "testing" + "time" + + mocks "github.com/0xPolygon/cdk/aggregator/mocks" + "github.com/0xPolygon/cdk/aggregator/prover" + "github.com/0xPolygon/cdk/config/types" + "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/state" + "github.com/0xPolygon/cdk/state/datastream" + "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" + "github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer" + "github.com/ethereum/go-ethereum/common" + ethTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" +) + +var ( + proofID = "proofId" + proof = "proof" + proverName = "proverName" + proverID = "proverID" +) + +type mox struct { + stateMock *mocks.StateInterfaceMock + ethTxManager *mocks.EthTxManagerClientMock + etherman *mocks.EthermanMock + proverMock *mocks.ProverInterfaceMock + aggLayerClientMock *mocks.AgglayerClientInterfaceMock +} + +func WaitUntil(t *testing.T, wg *sync.WaitGroup, timeout time.Duration) { + t.Helper() + + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + select { + case <-done: + case <-time.After(timeout): + t.Fatalf("WaitGroup not done, test time expired after %s", timeout) + } +} + +func Test_resetCurrentBatchData(t *testing.T) { + t.Parallel() + + a := Aggregator{ + currentBatchStreamData: []byte("test"), + currentStreamBatchRaw: state.BatchRawV2{ + Blocks: []state.L2BlockRaw{ + { + BlockNumber: 1, + ChangeL2BlockHeader: state.ChangeL2BlockHeader{}, + Transactions: []state.L2TxRaw{}, + }, + }, + }, + currentStreamL2Block: state.L2BlockRaw{}, + } + + a.resetCurrentBatchData() + + assert.Equal(t, []byte{}, a.currentBatchStreamData) + assert.Equal(t, state.BatchRawV2{Blocks: make([]state.L2BlockRaw, 0)}, a.currentStreamBatchRaw) + assert.Equal(t, state.L2BlockRaw{}, a.currentStreamL2Block) +} + +func Test_handleReorg(t *testing.T) { + t.Parallel() + + mockL1Syncr := new(mocks.SynchronizerInterfaceMock) + mockState := new(mocks.StateInterfaceMock) + reorgData := synchronizer.ReorgExecutionResult{} + + a := &Aggregator{ + l1Syncr: mockL1Syncr, + state: mockState, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, + ctx: context.Background(), + } + + mockL1Syncr.On("GetLastestVirtualBatchNumber", mock.Anything).Return(uint64(100), nil).Once() + mockState.On("DeleteBatchesNewerThanBatchNumber", mock.Anything, uint64(100), mock.Anything).Return(nil).Once() + + go a.handleReorg(reorgData) + time.Sleep(3 * time.Second) + + assert.True(t, a.halted.Load()) + mockState.AssertExpectations(t) + mockL1Syncr.AssertExpectations(t) +} + +func Test_handleRollbackBatches(t *testing.T) { + t.Parallel() + + mockStreamClient := new(mocks.StreamClientMock) + mockEtherman := new(mocks.EthermanMock) + mockState := new(mocks.StateInterfaceMock) + + // Test data + rollbackData := synchronizer.RollbackBatchesData{ + LastBatchNumber: 100, + } + + mockStreamClient.On("IsStarted").Return(true).Once() + mockStreamClient.On("ResetProcessEntryFunc").Return().Once() + mockStreamClient.On("SetProcessEntryFunc", mock.Anything).Return().Once() + mockStreamClient.On("ExecCommandStop").Return(nil).Once() + mockStreamClient.On("Start").Return(nil).Once() + mockStreamClient.On("ExecCommandStartBookmark", mock.Anything).Return(nil).Once() + mockEtherman.On("GetLatestVerifiedBatchNum").Return(uint64(90), nil).Once() + mockState.On("DeleteBatchesNewerThanBatchNumber", mock.Anything, rollbackData.LastBatchNumber, nil).Return(nil).Once() + mockState.On("DeleteBatchesOlderThanBatchNumber", mock.Anything, rollbackData.LastBatchNumber, nil).Return(nil).Once() + mockState.On("DeleteUngeneratedProofs", mock.Anything, nil).Return(nil).Once() + mockState.On("DeleteGeneratedProofs", mock.Anything, rollbackData.LastBatchNumber+1, mock.AnythingOfType("uint64"), nil).Return(nil).Once() + + a := Aggregator{ + ctx: context.Background(), + streamClient: mockStreamClient, + etherman: mockEtherman, + state: mockState, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, + streamClientMutex: &sync.Mutex{}, + currentBatchStreamData: []byte{}, + currentStreamBatchRaw: state.BatchRawV2{}, + currentStreamL2Block: state.L2BlockRaw{}, + } + + a.halted.Store(false) + a.handleRollbackBatches(rollbackData) + + assert.False(t, a.halted.Load()) + mockStreamClient.AssertExpectations(t) + mockEtherman.AssertExpectations(t) + mockState.AssertExpectations(t) +} + +func Test_handleReceivedDataStream_BatchStart(t *testing.T) { + t.Parallel() + + mockState := new(mocks.StateInterfaceMock) + mockL1Syncr := new(mocks.SynchronizerInterfaceMock) + agg := Aggregator{ + state: mockState, + l1Syncr: mockL1Syncr, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, + currentStreamBatch: state.Batch{}, + } + + // Prepare a FileEntry for Batch Start + batchStartData, err := proto.Marshal(&datastream.BatchStart{ + Number: 1, + ChainId: 2, + ForkId: 3, + Type: datastream.BatchType_BATCH_TYPE_REGULAR, + }) + assert.NoError(t, err) + + batchStartEntry := &datastreamer.FileEntry{ + Type: datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_BATCH_START), + Data: batchStartData, + } + + // Test the handleReceivedDataStream for Batch Start + err = agg.handleReceivedDataStream(batchStartEntry, nil, nil) + assert.NoError(t, err) + + assert.Equal(t, agg.currentStreamBatch.BatchNumber, uint64(1)) + assert.Equal(t, agg.currentStreamBatch.ChainID, uint64(2)) + assert.Equal(t, agg.currentStreamBatch.ForkID, uint64(3)) + assert.Equal(t, agg.currentStreamBatch.Type, datastream.BatchType_BATCH_TYPE_REGULAR) +} + +func Test_handleReceivedDataStream_BatchEnd(t *testing.T) { + t.Parallel() + + mockState := new(mocks.StateInterfaceMock) + mockL1Syncr := new(mocks.SynchronizerInterfaceMock) + a := Aggregator{ + state: mockState, + l1Syncr: mockL1Syncr, + logger: log.GetDefaultLogger(), + halted: atomic.Bool{}, + currentStreamBatch: state.Batch{ + BatchNumber: uint64(2), + Type: datastream.BatchType_BATCH_TYPE_REGULAR, + Coinbase: common.Address{}, + }, + currentStreamL2Block: state.L2BlockRaw{ + BlockNumber: uint64(10), + }, + currentStreamBatchRaw: state.BatchRawV2{ + Blocks: []state.L2BlockRaw{ + { + BlockNumber: uint64(9), + ChangeL2BlockHeader: state.ChangeL2BlockHeader{}, + Transactions: []state.L2TxRaw{}, + }, + }, + }, + cfg: Config{ + UseL1BatchData: false, + }, + } + + batchEndData, err := proto.Marshal(&datastream.BatchEnd{ + Number: 1, + LocalExitRoot: []byte{1, 2, 3}, + StateRoot: []byte{4, 5, 6}, + Debug: nil, + }) + assert.NoError(t, err) + + batchEndEntry := &datastreamer.FileEntry{ + Type: datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_BATCH_END), + Data: batchEndData, + } + + mockState.On("GetBatch", mock.Anything, a.currentStreamBatch.BatchNumber-1, nil). + Return(&state.DBBatch{ + Batch: state.Batch{ + AccInputHash: common.Hash{}, + }, + }, nil).Once() + mockState.On("GetBatch", mock.Anything, a.currentStreamBatch.BatchNumber, nil). + Return(&state.DBBatch{ + Witness: []byte("test_witness"), + }, nil).Once() + mockState.On("AddBatch", mock.Anything, mock.Anything, nil).Return(nil).Once() + mockL1Syncr.On("GetVirtualBatchByBatchNumber", mock.Anything, a.currentStreamBatch.BatchNumber). + Return(&synchronizer.VirtualBatch{BatchL2Data: []byte{1, 2, 3}}, nil).Once() + mockL1Syncr.On("GetSequenceByBatchNumber", mock.Anything, a.currentStreamBatch.BatchNumber). + Return(&synchronizer.SequencedBatches{ + L1InfoRoot: common.Hash{}, + Timestamp: time.Now(), + }, nil).Once() + + err = a.handleReceivedDataStream(batchEndEntry, nil, nil) + assert.NoError(t, err) + + assert.Equal(t, a.currentBatchStreamData, []byte{}) + assert.Equal(t, a.currentStreamBatchRaw, state.BatchRawV2{Blocks: make([]state.L2BlockRaw, 0)}) + assert.Equal(t, a.currentStreamL2Block, state.L2BlockRaw{}) + + mockState.AssertExpectations(t) + mockL1Syncr.AssertExpectations(t) +} + +func Test_handleReceivedDataStream_L2Block(t *testing.T) { + t.Parallel() + + a := Aggregator{ + currentStreamL2Block: state.L2BlockRaw{ + BlockNumber: uint64(9), + }, + currentStreamBatchRaw: state.BatchRawV2{ + Blocks: []state.L2BlockRaw{}, + }, + currentStreamBatch: state.Batch{}, + } + + // Mock data for L2Block + l2Block := &datastream.L2Block{ + Number: uint64(10), + DeltaTimestamp: uint32(5), + L1InfotreeIndex: uint32(1), + Coinbase: []byte{0x01}, + GlobalExitRoot: []byte{0x02}, + } + + l2BlockData, err := proto.Marshal(l2Block) + assert.NoError(t, err) + + l2BlockEntry := &datastreamer.FileEntry{ + Type: datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_L2_BLOCK), + Data: l2BlockData, + } + + err = a.handleReceivedDataStream(l2BlockEntry, nil, nil) + assert.NoError(t, err) + + assert.Equal(t, uint64(10), a.currentStreamL2Block.BlockNumber) + assert.Equal(t, uint32(5), a.currentStreamL2Block.ChangeL2BlockHeader.DeltaTimestamp) + assert.Equal(t, uint32(1), a.currentStreamL2Block.ChangeL2BlockHeader.IndexL1InfoTree) + assert.Equal(t, 0, len(a.currentStreamL2Block.Transactions)) + assert.Equal(t, uint32(1), a.currentStreamBatch.L1InfoTreeIndex) + assert.Equal(t, common.BytesToAddress([]byte{0x01}), a.currentStreamBatch.Coinbase) + assert.Equal(t, common.BytesToHash([]byte{0x02}), a.currentStreamBatch.GlobalExitRoot) +} + +func Test_handleReceivedDataStream_Transaction(t *testing.T) { + t.Parallel() + + a := Aggregator{ + currentStreamL2Block: state.L2BlockRaw{ + Transactions: []state.L2TxRaw{}, + }, + logger: log.GetDefaultLogger(), + } + + tx := ethTypes.NewTransaction( + 0, + common.HexToAddress("0x01"), + big.NewInt(1000000000000000000), + uint64(21000), + big.NewInt(20000000000), + nil, + ) + + // Encode transaction into RLP format + var buf bytes.Buffer + err := tx.EncodeRLP(&buf) + require.NoError(t, err, "Failed to encode transaction") + + transaction := &datastream.Transaction{ + L2BlockNumber: uint64(10), + Index: uint64(0), + IsValid: true, + Encoded: buf.Bytes(), + EffectiveGasPricePercentage: uint32(90), + } + + transactionData, err := proto.Marshal(transaction) + assert.NoError(t, err) + + transactionEntry := &datastreamer.FileEntry{ + Type: datastreamer.EntryType(datastream.EntryType_ENTRY_TYPE_TRANSACTION), + Data: transactionData, + } + + err = a.handleReceivedDataStream(transactionEntry, nil, nil) + assert.NoError(t, err) + + assert.Len(t, a.currentStreamL2Block.Transactions, 1) + assert.Equal(t, uint8(90), a.currentStreamL2Block.Transactions[0].EfficiencyPercentage) + assert.False(t, a.currentStreamL2Block.Transactions[0].TxAlreadyEncoded) + assert.NotNil(t, a.currentStreamL2Block.Transactions[0].Tx) +} + +func Test_sendFinalProofSuccess(t *testing.T) { + require := require.New(t) + assert := assert.New(t) + batchNum := uint64(23) + batchNumFinal := uint64(42) + + recursiveProof := &state.Proof{ + Prover: &proverName, + ProverID: &proverID, + ProofID: &proofID, + BatchNumber: batchNum, + BatchNumberFinal: batchNumFinal, + } + finalProof := &prover.FinalProof{} + + testCases := []struct { + name string + setup func(m mox, a *Aggregator) + asserts func(a *Aggregator) + }{ + { + name: "Successfully settled on Agglayer", + setup: func(m mox, a *Aggregator) { + cfg := Config{ + SettlementBackend: AggLayer, + AggLayerTxTimeout: types.Duration{Duration: time.Millisecond * 1}, + } + a.cfg = cfg + + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + }).Return(&state.DBBatch{ + Batch: state.Batch{ + LocalExitRoot: common.Hash{}, + StateRoot: common.Hash{}, + }, + }, nil).Once() + + m.etherman.On("GetRollupId").Return(uint32(1)).Once() + testHash := common.BytesToHash([]byte("test hash")) + m.aggLayerClientMock.On("SendTx", mock.Anything).Return(testHash, nil).Once() + m.aggLayerClientMock.On("WaitTxToBeMined", testHash, mock.Anything).Return(nil).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + { + name: "Successfully settled on L1 (Direct)", + setup: func(m mox, a *Aggregator) { + senderAddr := common.BytesToAddress([]byte("sender address")).Hex() + toAddr := common.BytesToAddress([]byte("to address")) + data := []byte("data") + cfg := Config{ + SettlementBackend: L1, + SenderAddress: senderAddr, + GasOffset: uint64(10), + } + a.cfg = cfg + + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + }).Return(&state.DBBatch{ + Batch: state.Batch{ + LocalExitRoot: common.Hash{}, + StateRoot: common.Hash{}, + }, + }, nil).Once() + + m.etherman.On("BuildTrustedVerifyBatchesTxData", batchNum-1, batchNumFinal, mock.Anything, common.HexToAddress(senderAddr)).Return(&toAddr, data, nil).Once() + m.ethTxManager.On("Add", mock.Anything, &toAddr, big.NewInt(0), data, a.cfg.GasOffset, (*ethTypes.BlobTxSidecar)(nil)).Return(nil, nil).Once() + m.ethTxManager.On("ProcessPendingMonitoredTxs", mock.Anything, mock.Anything).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + stateMock := mocks.NewStateInterfaceMock(t) + ethTxManager := mocks.NewEthTxManagerClientMock(t) + etherman := mocks.NewEthermanMock(t) + aggLayerClient := mocks.NewAgglayerClientInterfaceMock(t) + + curve := elliptic.P256() + privateKey, err := ecdsa.GenerateKey(curve, rand.Reader) + require.NoError(err, "error generating key") + + a := Aggregator{ + state: stateMock, + etherman: etherman, + ethTxManager: ethTxManager, + aggLayerClient: aggLayerClient, + finalProof: make(chan finalProofMsg), + logger: log.GetDefaultLogger(), + verifyingProof: false, + stateDBMutex: &sync.Mutex{}, + timeSendFinalProofMutex: &sync.RWMutex{}, + sequencerPrivateKey: privateKey, + } + a.ctx, a.exit = context.WithCancel(context.Background()) + + m := mox{ + stateMock: stateMock, + ethTxManager: ethTxManager, + etherman: etherman, + aggLayerClientMock: aggLayerClient, + } + if tc.setup != nil { + tc.setup(m, &a) + } + // send a final proof over the channel + go func() { + finalMsg := finalProofMsg{ + proverID: proverID, + recursiveProof: recursiveProof, + finalProof: finalProof, + } + a.finalProof <- finalMsg + time.Sleep(1 * time.Second) + a.exit() + }() + + a.sendFinalProof() + if tc.asserts != nil { + tc.asserts(&a) + } + }) + } +} + +func Test_sendFinalProofError(t *testing.T) { + require := require.New(t) + assert := assert.New(t) + errTest := errors.New("test error") + batchNum := uint64(23) + batchNumFinal := uint64(42) + sender := common.BytesToAddress([]byte("SenderAddress")) + senderAddr := sender.Hex() + + recursiveProof := &state.Proof{ + Prover: &proverName, + ProverID: &proverID, + ProofID: &proofID, + BatchNumber: batchNum, + BatchNumberFinal: batchNumFinal, + } + finalProof := &prover.FinalProof{} + + testCases := []struct { + name string + setup func(mox, *Aggregator) + asserts func(*Aggregator) + }{ + { + name: "Failed to settle on Agglayer: GetBatch error", + setup: func(m mox, a *Aggregator) { + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + // test is done, stop the sendFinalProof method + fmt.Println("Stopping sendFinalProof") + a.exit() + }).Return(nil, errTest).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + { + name: "Failed to settle on Agglayer: SendTx error", + setup: func(m mox, a *Aggregator) { + cfg := Config{ + SettlementBackend: AggLayer, + } + a.cfg = cfg + + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + }).Return(&state.DBBatch{ + Batch: state.Batch{ + LocalExitRoot: common.Hash{}, + StateRoot: common.Hash{}, + }, + }, nil).Once() + + m.etherman.On("GetRollupId").Return(uint32(1)).Once() + m.aggLayerClientMock.On("SendTx", mock.Anything).Run(func(args mock.Arguments) { + // test is done, stop the sendFinalProof method + fmt.Println("Stopping sendFinalProof") + a.exit() + }).Return(nil, errTest).Once() + m.stateMock.On("UpdateGeneratedProof", mock.Anything, mock.Anything, nil).Return(nil).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + { + name: "Failed to settle on Agglayer: WaitTxToBeMined error", + setup: func(m mox, a *Aggregator) { + cfg := Config{ + SettlementBackend: AggLayer, + AggLayerTxTimeout: types.Duration{Duration: time.Millisecond * 1}, + } + a.cfg = cfg + + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + }).Return(&state.DBBatch{ + Batch: state.Batch{ + LocalExitRoot: common.Hash{}, + StateRoot: common.Hash{}, + }, + }, nil).Once() + + m.etherman.On("GetRollupId").Return(uint32(1)).Once() + m.aggLayerClientMock.On("SendTx", mock.Anything).Return(common.Hash{}, nil).Once() + m.aggLayerClientMock.On("WaitTxToBeMined", mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + fmt.Println("Stopping sendFinalProof") + a.exit() + }).Return(errTest) + m.stateMock.On("UpdateGeneratedProof", mock.Anything, mock.Anything, nil).Return(nil).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + { + name: "Failed to settle on L1 (Direct): BuildTrustedVerifyBatchesTxData error", + setup: func(m mox, a *Aggregator) { + cfg := Config{ + SettlementBackend: L1, + SenderAddress: senderAddr, + } + a.cfg = cfg + + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + }).Return(&state.DBBatch{ + Batch: state.Batch{ + LocalExitRoot: common.Hash{}, + StateRoot: common.Hash{}, + }, + }, nil).Once() + + m.etherman.On("BuildTrustedVerifyBatchesTxData", batchNum-1, batchNumFinal, mock.Anything, sender).Run(func(args mock.Arguments) { + fmt.Println("Stopping sendFinalProof") + a.exit() + }).Return(nil, nil, errTest) + m.stateMock.On("UpdateGeneratedProof", mock.Anything, recursiveProof, nil).Return(nil).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + { + name: "Failed to settle on L1 (Direct): Error Adding TX to ethTxManager", + setup: func(m mox, a *Aggregator) { + cfg := Config{ + SettlementBackend: L1, + SenderAddress: senderAddr, + GasOffset: uint64(10), + } + a.cfg = cfg + + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Run(func(args mock.Arguments) { + }).Return(&state.DBBatch{ + Batch: state.Batch{ + LocalExitRoot: common.Hash{}, + StateRoot: common.Hash{}, + }, + }, nil).Once() + + m.etherman.On("BuildTrustedVerifyBatchesTxData", batchNum-1, batchNumFinal, mock.Anything, sender).Return(nil, nil, nil).Once() + m.ethTxManager.On("Add", mock.Anything, mock.Anything, big.NewInt(0), mock.Anything, a.cfg.GasOffset, (*ethTypes.BlobTxSidecar)(nil)).Run(func(args mock.Arguments) { + fmt.Println("Stopping sendFinalProof") + a.exit() + }).Return(nil, errTest).Once() + m.stateMock.On("UpdateGeneratedProof", mock.Anything, recursiveProof, nil).Return(nil).Once() + }, + asserts: func(a *Aggregator) { + assert.False(a.verifyingProof) + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + stateMock := mocks.NewStateInterfaceMock(t) + ethTxManager := mocks.NewEthTxManagerClientMock(t) + etherman := mocks.NewEthermanMock(t) + aggLayerClient := mocks.NewAgglayerClientInterfaceMock(t) + + curve := elliptic.P256() + privateKey, err := ecdsa.GenerateKey(curve, rand.Reader) + require.NoError(err, "error generating key") + + a := Aggregator{ + state: stateMock, + etherman: etherman, + ethTxManager: ethTxManager, + aggLayerClient: aggLayerClient, + finalProof: make(chan finalProofMsg), + logger: log.GetDefaultLogger(), + verifyingProof: false, + stateDBMutex: &sync.Mutex{}, + timeSendFinalProofMutex: &sync.RWMutex{}, + sequencerPrivateKey: privateKey, + } + a.ctx, a.exit = context.WithCancel(context.Background()) + + m := mox{ + stateMock: stateMock, + ethTxManager: ethTxManager, + etherman: etherman, + aggLayerClientMock: aggLayerClient, + } + if tc.setup != nil { + tc.setup(m, &a) + } + // send a final proof over the channel + go func() { + finalMsg := finalProofMsg{ + proverID: proverID, + recursiveProof: recursiveProof, + finalProof: finalProof, + } + a.finalProof <- finalMsg + }() + + a.sendFinalProof() + if tc.asserts != nil { + tc.asserts(&a) + } + }) + } +} + +func Test_buildFinalProof(t *testing.T) { + assert := assert.New(t) + batchNum := uint64(23) + batchNumFinal := uint64(42) + recursiveProof := &state.Proof{ + ProverID: &proverID, + Proof: "test proof", + ProofID: &proofID, + BatchNumber: batchNum, + BatchNumberFinal: batchNumFinal, + } + finalProofID := "finalProofID" + + testCases := []struct { + name string + setup func(mox, *Aggregator) + asserts func(err error, fProof *prover.FinalProof) + }{ + { + name: "using real prover", + setup: func(m mox, a *Aggregator) { + finalProof := prover.FinalProof{ + Public: &prover.PublicInputsExtended{ + NewStateRoot: []byte("StateRoot"), + NewLocalExitRoot: []byte("LocalExitRoot"), + }, + } + + m.proverMock.On("Name").Return("name").Once() + m.proverMock.On("ID").Return("id").Once() + m.proverMock.On("Addr").Return("addr").Once() + m.proverMock.On("FinalProof", recursiveProof.Proof, a.cfg.SenderAddress).Return(&finalProofID, nil).Once() + m.proverMock.On("WaitFinalProof", mock.Anything, finalProofID).Return(&finalProof, nil).Once() + }, + asserts: func(err error, fProof *prover.FinalProof) { + assert.NoError(err) + assert.True(bytes.Equal([]byte("StateRoot"), fProof.Public.NewStateRoot), "State roots should be equal") + assert.True(bytes.Equal([]byte("LocalExitRoot"), fProof.Public.NewLocalExitRoot), "LocalExit roots should be equal") + }, + }, + { + name: "using mock prover", + setup: func(m mox, a *Aggregator) { + finalProof := prover.FinalProof{ + Public: &prover.PublicInputsExtended{ + NewStateRoot: []byte(mockedStateRoot), + NewLocalExitRoot: []byte(mockedLocalExitRoot), + }, + } + + finalDBBatch := &state.DBBatch{ + Batch: state.Batch{ + StateRoot: common.BytesToHash([]byte("mock StateRoot")), + LocalExitRoot: common.BytesToHash([]byte("mock LocalExitRoot")), + }, + } + + m.proverMock.On("Name").Return("name").Once() + m.proverMock.On("ID").Return("id").Once() + m.proverMock.On("Addr").Return("addr").Once() + m.proverMock.On("FinalProof", recursiveProof.Proof, a.cfg.SenderAddress).Return(&finalProofID, nil).Once() + m.proverMock.On("WaitFinalProof", mock.Anything, finalProofID).Return(&finalProof, nil).Once() + m.stateMock.On("GetBatch", mock.Anything, batchNumFinal, nil).Return(finalDBBatch, nil).Once() + }, + asserts: func(err error, fProof *prover.FinalProof) { + assert.NoError(err) + expStateRoot := common.BytesToHash([]byte("mock StateRoot")) + expLocalExitRoot := common.BytesToHash([]byte("mock LocalExitRoot")) + assert.True(bytes.Equal(expStateRoot.Bytes(), fProof.Public.NewStateRoot), "State roots should be equal") + assert.True(bytes.Equal(expLocalExitRoot.Bytes(), fProof.Public.NewLocalExitRoot), "LocalExit roots should be equal") + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + proverMock := mocks.NewProverInterfaceMock(t) + stateMock := mocks.NewStateInterfaceMock(t) + m := mox{ + proverMock: proverMock, + stateMock: stateMock, + } + a := Aggregator{ + state: stateMock, + logger: log.GetDefaultLogger(), + cfg: Config{ + SenderAddress: common.BytesToAddress([]byte("from")).Hex(), + }, + } + + tc.setup(m, &a) + fProof, err := a.buildFinalProof(context.Background(), proverMock, recursiveProof) + tc.asserts(err, fProof) + }) + } +} + +func Test_tryBuildFinalProof(t *testing.T) { + assert := assert.New(t) + errTest := errors.New("test error") + from := common.BytesToAddress([]byte("from")) + cfg := Config{ + VerifyProofInterval: types.Duration{Duration: time.Millisecond * 1}, + TxProfitabilityCheckerType: ProfitabilityAcceptAll, + SenderAddress: from.Hex(), + } + latestVerifiedBatchNum := uint64(22) + batchNum := uint64(23) + batchNumFinal := uint64(42) + finalProofID := "finalProofID" + finalProof := prover.FinalProof{ + Proof: "", + Public: &prover.PublicInputsExtended{ + NewStateRoot: []byte("newStateRoot"), + NewLocalExitRoot: []byte("newLocalExitRoot"), + }, + } + proofToVerify := state.Proof{ + ProofID: &proofID, + Proof: proof, + BatchNumber: batchNum, + BatchNumberFinal: batchNumFinal, + } + invalidProof := state.Proof{ + ProofID: &proofID, + Proof: proof, + BatchNumber: uint64(123), + BatchNumberFinal: uint64(456), + } + + proverCtx := context.WithValue(context.Background(), "owner", "prover") //nolint:staticcheck + matchProverCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == "prover" } + matchAggregatorCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == "aggregator" } + testCases := []struct { + name string + proof *state.Proof + setup func(mox, *Aggregator) + asserts func(bool, *Aggregator, error) + assertFinalMsg func(*finalProofMsg) + }{ + { + name: "can't verify proof (verifyingProof = true)", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return("addr").Once() + a.verifyingProof = true + }, + asserts: func(result bool, a *Aggregator, err error) { + a.verifyingProof = false // reset + assert.False(result) + assert.NoError(err) + }, + }, + { + name: "can't verify proof (veryfy time not reached yet)", + setup: func(m mox, a *Aggregator) { + a.timeSendFinalProof = time.Now().Add(10 * time.Second) + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return("addr").Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.NoError(err) + }, + }, + { + name: "nil proof, error requesting the proof triggers defer", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr").Twice() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(&proofToVerify, nil).Once() + proofGeneratingTrueCall := m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(nil).Once() + m.proverMock.On("FinalProof", proofToVerify.Proof, from.String()).Return(nil, errTest).Once() + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proofToVerify, nil). + Return(nil). + Once(). + NotBefore(proofGeneratingTrueCall) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "nil proof, error building the proof triggers defer", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr").Twice() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(&proofToVerify, nil).Once() + proofGeneratingTrueCall := m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(nil).Once() + m.proverMock.On("FinalProof", proofToVerify.Proof, from.String()).Return(&finalProofID, nil).Once() + m.proverMock.On("WaitFinalProof", mock.MatchedBy(matchProverCtxFn), finalProofID).Return(nil, errTest).Once() + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proofToVerify, nil). + Return(nil). + Once(). + NotBefore(proofGeneratingTrueCall) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "nil proof, generic error from GetProofReadyToVerify", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return(proverID).Once() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(nil, errTest).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "nil proof, ErrNotFound from GetProofReadyToVerify", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return(proverID).Once() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(nil, state.ErrNotFound).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.NoError(err) + }, + }, + { + name: "nil proof gets a proof ready to verify", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return(proverID).Twice() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("GetProofReadyToVerify", mock.MatchedBy(matchProverCtxFn), latestVerifiedBatchNum, nil).Return(&proofToVerify, nil).Once() + m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(nil).Once() + m.proverMock.On("FinalProof", proofToVerify.Proof, from.String()).Return(&finalProofID, nil).Once() + m.proverMock.On("WaitFinalProof", mock.MatchedBy(matchProverCtxFn), finalProofID).Return(&finalProof, nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.True(result) + assert.NoError(err) + }, + assertFinalMsg: func(msg *finalProofMsg) { + assert.Equal(finalProof.Proof, msg.finalProof.Proof) + assert.Equal(finalProof.Public.NewStateRoot, msg.finalProof.Public.NewStateRoot) + assert.Equal(finalProof.Public.NewLocalExitRoot, msg.finalProof.Public.NewLocalExitRoot) + }, + }, + { + name: "error checking if proof is a complete sequence", + proof: &proofToVerify, + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return(proverID).Once() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("CheckProofContainsCompleteSequences", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(false, errTest).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "invalid proof (not consecutive to latest verified batch) rejected", + proof: &invalidProof, + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return(proverID).Once() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.NoError(err) + }, + }, + { + name: "invalid proof (not a complete sequence) rejected", + proof: &proofToVerify, + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Once() + m.proverMock.On("ID").Return(proverID).Once() + m.proverMock.On("Addr").Return(proverID).Once() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("CheckProofContainsCompleteSequences", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(false, nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.NoError(err) + }, + }, + { + name: "valid proof", + proof: &proofToVerify, + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return(proverID).Twice() + m.etherman.On("GetLatestVerifiedBatchNum").Return(latestVerifiedBatchNum, nil).Once() + m.stateMock.On("CheckProofContainsCompleteSequences", mock.MatchedBy(matchProverCtxFn), &proofToVerify, nil).Return(true, nil).Once() + m.proverMock.On("FinalProof", proofToVerify.Proof, from.String()).Return(&finalProofID, nil).Once() + m.proverMock.On("WaitFinalProof", mock.MatchedBy(matchProverCtxFn), finalProofID).Return(&finalProof, nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.True(result) + assert.NoError(err) + }, + assertFinalMsg: func(msg *finalProofMsg) { + assert.Equal(finalProof.Proof, msg.finalProof.Proof) + assert.Equal(finalProof.Public.NewStateRoot, msg.finalProof.Public.NewStateRoot) + assert.Equal(finalProof.Public.NewLocalExitRoot, msg.finalProof.Public.NewLocalExitRoot) + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + stateMock := mocks.NewStateInterfaceMock(t) + ethTxManager := mocks.NewEthTxManagerClientMock(t) + etherman := mocks.NewEthermanMock(t) + proverMock := mocks.NewProverInterfaceMock(t) + + a := Aggregator{ + cfg: cfg, + state: stateMock, + etherman: etherman, + ethTxManager: ethTxManager, + logger: log.GetDefaultLogger(), + stateDBMutex: &sync.Mutex{}, + timeSendFinalProofMutex: &sync.RWMutex{}, + timeCleanupLockedProofs: cfg.CleanupLockedProofsInterval, + finalProof: make(chan finalProofMsg), + } + + aggregatorCtx := context.WithValue(context.Background(), "owner", "aggregator") //nolint:staticcheck + a.ctx, a.exit = context.WithCancel(aggregatorCtx) + m := mox{ + stateMock: stateMock, + ethTxManager: ethTxManager, + etherman: etherman, + proverMock: proverMock, + } + if tc.setup != nil { + tc.setup(m, &a) + } + + var wg sync.WaitGroup + if tc.assertFinalMsg != nil { + // wait for the final proof over the channel + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + msg := <-a.finalProof + tc.assertFinalMsg(&msg) + }() + } + + result, err := a.tryBuildFinalProof(proverCtx, proverMock, tc.proof) + + if tc.asserts != nil { + tc.asserts(result, &a, err) + } + + if tc.assertFinalMsg != nil { + WaitUntil(t, &wg, time.Second) + } + }) + } +} + +func Test_tryAggregateProofs(t *testing.T) { + require := require.New(t) + assert := assert.New(t) + errTest := errors.New("test error") + cfg := Config{ + VerifyProofInterval: types.Duration{Duration: time.Millisecond * 1}, + } + + recursiveProof := "recursiveProof" + proverCtx := context.WithValue(context.Background(), "owner", "prover") //nolint:staticcheck + matchProverCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == "prover" } + matchAggregatorCtxFn := func(ctx context.Context) bool { return ctx.Value("owner") == "aggregator" } + batchNum := uint64(23) + batchNumFinal := uint64(42) + proof1 := state.Proof{ + Proof: "proof1", + BatchNumber: batchNum, + } + proof2 := state.Proof{ + Proof: "proof2", + BatchNumberFinal: batchNumFinal, + } + testCases := []struct { + name string + setup func(mox, *Aggregator) + asserts func(bool, *Aggregator, error) + }{ + { + name: "getAndLockProofsToAggregate returns generic error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(nil, nil, errTest).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "getAndLockProofsToAggregate returns ErrNotFound", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(nil, nil, state.ErrNotFound).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.NoError(err) + }, + }, + { + name: "getAndLockProofsToAggregate error updating proofs", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + dbTx := &mocks.DbTxMock{} + dbTx.On("Rollback", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Once() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(errTest). + Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "AggregatedProof error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + dbTx := &mocks.DbTxMock{} + lockProofsTxBegin := m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Once() + lockProofsTxCommit := dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + proof1GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + proof2GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + // Use a type assertion with a check + proofArg, ok := args[1].(*state.Proof) + if !ok { + assert.Fail("Expected argument of type *state.Proof") + } + assert.NotNil(proofArg.GeneratingSince) + }). + Return(nil). + Once() + m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(nil, errTest).Once() + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + if !ok { + assert.Fail("Expected argument of type *state.Proof") + } + assert.Nil(proofArg.GeneratingSince) + }). + Return(nil). + Once(). + NotBefore(proof1GeneratingTrueCall) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + if !ok { + assert.Fail("Expected argument of type *state.Proof") + } + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be nil") + }). + Return(nil). + Once(). + NotBefore(proof2GeneratingTrueCall) + dbTx.On("Commit", mock.MatchedBy(matchAggregatorCtxFn)).Return(nil).Once().NotBefore(lockProofsTxCommit) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "WaitRecursiveProof prover error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + dbTx := &mocks.DbTxMock{} + lockProofsTxBegin := m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Once() + lockProofsTxCommit := dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + proof1GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + if !ok { + assert.Fail("Expected argument of type *state.Proof") + } + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + proof2GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return("", common.Hash{}, errTest).Once() + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once(). + NotBefore(proof1GeneratingTrueCall) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once(). + NotBefore(proof2GeneratingTrueCall) + dbTx.On("Commit", mock.MatchedBy(matchAggregatorCtxFn)).Return(nil).Once().NotBefore(lockProofsTxCommit) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "unlockProofsToAggregate error after WaitRecursiveProof prover error", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return(proverID) + dbTx := &mocks.DbTxMock{} + lockProofsTxBegin := m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Once() + dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + proof1GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return("", common.Hash{}, errTest).Once() + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(errTest). + Once(). + NotBefore(proof1GeneratingTrueCall) + dbTx.On("Rollback", mock.MatchedBy(matchAggregatorCtxFn)).Return(nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "rollback after DeleteGeneratedProofs error in db transaction", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + dbTx := &mocks.DbTxMock{} + lockProofsTxBegin := m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Twice() + lockProofsTxCommit := dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + proof1GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + proof2GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, nil).Once() + m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchProverCtxFn), proof1.BatchNumber, proof2.BatchNumberFinal, dbTx).Return(errTest).Once() + dbTx.On("Rollback", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once(). + NotBefore(proof1GeneratingTrueCall) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once(). + NotBefore(proof2GeneratingTrueCall) + dbTx.On("Commit", mock.MatchedBy(matchAggregatorCtxFn)).Return(nil).Once().NotBefore(lockProofsTxCommit) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "rollback after AddGeneratedProof error in db transaction", + setup: func(m mox, a *Aggregator) { + m.proverMock.On("Name").Return(proverName).Twice() + m.proverMock.On("ID").Return(proverID).Twice() + m.proverMock.On("Addr").Return("addr") + dbTx := &mocks.DbTxMock{} + lockProofsTxBegin := m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Twice() + lockProofsTxCommit := dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + proof1GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + proof2GeneratingTrueCall := m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, nil).Once() + m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchProverCtxFn), proof1.BatchNumber, proof2.BatchNumberFinal, dbTx).Return(nil).Once() + m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, dbTx).Return(errTest).Once() + dbTx.On("Rollback", mock.MatchedBy(matchProverCtxFn)).Return(nil).Once() + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchAggregatorCtxFn)).Return(dbTx, nil).Once().NotBefore(lockProofsTxBegin) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once(). + NotBefore(proof1GeneratingTrueCall) + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.Nil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once(). + NotBefore(proof2GeneratingTrueCall) + dbTx.On("Commit", mock.MatchedBy(matchAggregatorCtxFn)).Return(nil).Once().NotBefore(lockProofsTxCommit) + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.False(result) + assert.ErrorIs(err, errTest) + }, + }, + { + name: "time to send final, state error", + setup: func(m mox, a *Aggregator) { + a.cfg.VerifyProofInterval = types.Duration{Duration: time.Nanosecond} + m.proverMock.On("Name").Return(proverName).Times(3) + m.proverMock.On("ID").Return(proverID).Times(3) + m.proverMock.On("Addr").Return("addr") + dbTx := &mocks.DbTxMock{} + m.stateMock.On("BeginStateTransaction", mock.MatchedBy(matchProverCtxFn)).Return(dbTx, nil).Twice() + dbTx.On("Commit", mock.MatchedBy(matchProverCtxFn)).Return(nil).Twice() + m.stateMock.On("GetProofsToAggregate", mock.MatchedBy(matchProverCtxFn), nil).Return(&proof1, &proof2, nil).Once() + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof1, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + m.stateMock. + On("UpdateGeneratedProof", mock.MatchedBy(matchProverCtxFn), &proof2, dbTx). + Run(func(args mock.Arguments) { + proofArg, ok := args[1].(*state.Proof) + assert.True(ok, "Expected argument of type *state.Proof") + assert.NotNil(proofArg.GeneratingSince, "Expected GeneratingSince to be not nil") + }). + Return(nil). + Once() + + m.proverMock.On("AggregatedProof", proof1.Proof, proof2.Proof).Return(&proofID, nil).Once() + m.proverMock.On("WaitRecursiveProof", mock.MatchedBy(matchProverCtxFn), proofID).Return(recursiveProof, common.Hash{}, nil).Once() + m.stateMock.On("DeleteGeneratedProofs", mock.MatchedBy(matchProverCtxFn), proof1.BatchNumber, proof2.BatchNumberFinal, dbTx).Return(nil).Once() + expectedInputProver := map[string]interface{}{ + "recursive_proof_1": proof1.Proof, + "recursive_proof_2": proof2.Proof, + } + b, err := json.Marshal(expectedInputProver) + require.NoError(err) + m.stateMock.On("AddGeneratedProof", mock.MatchedBy(matchProverCtxFn), mock.Anything, dbTx).Run( + func(args mock.Arguments) { + proof, ok := args[1].(*state.Proof) + if !ok { + t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) + } + assert.Equal(proof1.BatchNumber, proof.BatchNumber) + assert.Equal(proof2.BatchNumberFinal, proof.BatchNumberFinal) + assert.Equal(&proverName, proof.Prover) + assert.Equal(&proverID, proof.ProverID) + assert.Equal(string(b), proof.InputProver) + assert.Equal(recursiveProof, proof.Proof) + assert.InDelta(time.Now().Unix(), proof.GeneratingSince.Unix(), float64(time.Second)) + }, + ).Return(nil).Once() + + m.etherman.On("GetLatestVerifiedBatchNum").Return(uint64(42), errTest).Once() + m.stateMock.On("UpdateGeneratedProof", mock.MatchedBy(matchAggregatorCtxFn), mock.Anything, nil).Run( + func(args mock.Arguments) { + proof, ok := args[1].(*state.Proof) + if !ok { + t.Fatalf("expected args[1] to be of type *state.Proof, got %T", args[1]) + } + assert.Equal(proof1.BatchNumber, proof.BatchNumber) + assert.Equal(proof2.BatchNumberFinal, proof.BatchNumberFinal) + assert.Equal(&proverName, proof.Prover) + assert.Equal(&proverID, proof.ProverID) + assert.Equal(string(b), proof.InputProver) + assert.Equal(recursiveProof, proof.Proof) + assert.Nil(proof.GeneratingSince) + }, + ).Return(nil).Once() + }, + asserts: func(result bool, a *Aggregator, err error) { + assert.True(result) + assert.NoError(err) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + stateMock := mocks.NewStateInterfaceMock(t) + ethTxManager := mocks.NewEthTxManagerClientMock(t) + etherman := mocks.NewEthermanMock(t) + proverMock := mocks.NewProverInterfaceMock(t) + a := Aggregator{ + cfg: cfg, + state: stateMock, + etherman: etherman, + ethTxManager: ethTxManager, + logger: log.GetDefaultLogger(), + stateDBMutex: &sync.Mutex{}, + timeSendFinalProofMutex: &sync.RWMutex{}, + timeCleanupLockedProofs: cfg.CleanupLockedProofsInterval, + finalProof: make(chan finalProofMsg), + } + aggregatorCtx := context.WithValue(context.Background(), "owner", "aggregator") //nolint:staticcheck + a.ctx, a.exit = context.WithCancel(aggregatorCtx) + m := mox{ + stateMock: stateMock, + ethTxManager: ethTxManager, + etherman: etherman, + proverMock: proverMock, + } + if tc.setup != nil { + tc.setup(m, &a) + } + a.resetVerifyProofTime() + + result, err := a.tryAggregateProofs(proverCtx, proverMock) + + if tc.asserts != nil { + tc.asserts(result, &a, err) + } + }) + } +} diff --git a/aggregator/config.go b/aggregator/config.go index 89676e3d..fbbc9c9b 100644 --- a/aggregator/config.go +++ b/aggregator/config.go @@ -152,8 +152,8 @@ type Config struct { // MaxWitnessRetrievalWorkers is the maximum number of workers that will be used to retrieve the witness MaxWitnessRetrievalWorkers int `mapstructure:"MaxWitnessRetrievalWorkers"` - // SyncModeOnlyEnabled is a flag to enable the sync mode only - // In this mode the aggregator will only sync from L1 and will not generate or read the data stream + // SyncModeOnlyEnabled is a flag that activates sync mode exclusively. + // When enabled, the aggregator will sync data only from L1 and will not generate or read the data stream. SyncModeOnlyEnabled bool `mapstructure:"SyncModeOnlyEnabled"` } diff --git a/aggregator/interfaces.go b/aggregator/interfaces.go index 85676f69..ee70d07c 100644 --- a/aggregator/interfaces.go +++ b/aggregator/interfaces.go @@ -7,14 +7,18 @@ import ( ethmanTypes "github.com/0xPolygon/cdk/aggregator/ethmantypes" "github.com/0xPolygon/cdk/aggregator/prover" "github.com/0xPolygon/cdk/state" + "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" + "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/jackc/pgx/v4" ) // Consumer interfaces required by the package. -type proverInterface interface { +type ProverInterface interface { Name() string ID() string Addr() string @@ -26,8 +30,8 @@ type proverInterface interface { WaitFinalProof(ctx context.Context, proofID string) (*prover.FinalProof, error) } -// etherman contains the methods required to interact with ethereum -type etherman interface { +// Etherman contains the methods required to interact with ethereum +type Etherman interface { GetRollupId() uint32 GetLatestVerifiedBatchNum() (uint64, error) BuildTrustedVerifyBatchesTxData( @@ -44,8 +48,8 @@ type aggregatorTxProfitabilityChecker interface { IsProfitable(context.Context, *big.Int) (bool, error) } -// stateInterface gathers the methods to interact with the state. -type stateInterface interface { +// StateInterface gathers the methods to interact with the state. +type StateInterface interface { BeginStateTransaction(ctx context.Context) (pgx.Tx, error) CheckProofContainsCompleteSequences(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) (bool, error) GetProofReadyToVerify(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx) (*state.Proof, error) @@ -63,3 +67,49 @@ type stateInterface interface { DeleteBatchesOlderThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error DeleteBatchesNewerThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error } + +// StreamClient represents the stream client behaviour +type StreamClient interface { + Start() error + ExecCommandStart(fromEntry uint64) error + ExecCommandStartBookmark(fromBookmark []byte) error + ExecCommandStop() error + ExecCommandGetHeader() (datastreamer.HeaderEntry, error) + ExecCommandGetEntry(fromEntry uint64) (datastreamer.FileEntry, error) + ExecCommandGetBookmark(fromBookmark []byte) (datastreamer.FileEntry, error) + GetFromStream() uint64 + GetTotalEntries() uint64 + SetProcessEntryFunc(f datastreamer.ProcessEntryFunc) + ResetProcessEntryFunc() + IsStarted() bool +} + +// EthTxManagerClient represents the eth tx manager interface +type EthTxManagerClient interface { + Add( + ctx context.Context, + to *common.Address, + value *big.Int, + data []byte, + gasOffset uint64, + sidecar *types.BlobTxSidecar, + ) (common.Hash, error) + AddWithGas( + ctx context.Context, + to *common.Address, + value *big.Int, + data []byte, + gasOffset uint64, + sidecar *types.BlobTxSidecar, + gas uint64, + ) (common.Hash, error) + EncodeBlobData(data []byte) (kzg4844.Blob, error) + MakeBlobSidecar(blobs []kzg4844.Blob) *types.BlobTxSidecar + ProcessPendingMonitoredTxs(ctx context.Context, resultHandler ethtxmanager.ResultHandler) + Remove(ctx context.Context, id common.Hash) error + RemoveAll(ctx context.Context) error + Result(ctx context.Context, id common.Hash) (ethtxtypes.MonitoredTxResult, error) + ResultsByStatus(ctx context.Context, statuses []ethtxtypes.MonitoredTxStatus) ([]ethtxtypes.MonitoredTxResult, error) + Start() + Stop() +} diff --git a/aggregator/mocks/mock_StreamClient.go b/aggregator/mocks/mock_StreamClient.go new file mode 100644 index 00000000..7962d31e --- /dev/null +++ b/aggregator/mocks/mock_StreamClient.go @@ -0,0 +1,247 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + datastreamer "github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer" + mock "github.com/stretchr/testify/mock" +) + +// StreamClientMock is an autogenerated mock type for the StreamClient type +type StreamClientMock struct { + mock.Mock +} + +// ExecCommandGetBookmark provides a mock function with given fields: fromBookmark +func (_m *StreamClientMock) ExecCommandGetBookmark(fromBookmark []byte) (datastreamer.FileEntry, error) { + ret := _m.Called(fromBookmark) + + if len(ret) == 0 { + panic("no return value specified for ExecCommandGetBookmark") + } + + var r0 datastreamer.FileEntry + var r1 error + if rf, ok := ret.Get(0).(func([]byte) (datastreamer.FileEntry, error)); ok { + return rf(fromBookmark) + } + if rf, ok := ret.Get(0).(func([]byte) datastreamer.FileEntry); ok { + r0 = rf(fromBookmark) + } else { + r0 = ret.Get(0).(datastreamer.FileEntry) + } + + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(fromBookmark) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ExecCommandGetEntry provides a mock function with given fields: fromEntry +func (_m *StreamClientMock) ExecCommandGetEntry(fromEntry uint64) (datastreamer.FileEntry, error) { + ret := _m.Called(fromEntry) + + if len(ret) == 0 { + panic("no return value specified for ExecCommandGetEntry") + } + + var r0 datastreamer.FileEntry + var r1 error + if rf, ok := ret.Get(0).(func(uint64) (datastreamer.FileEntry, error)); ok { + return rf(fromEntry) + } + if rf, ok := ret.Get(0).(func(uint64) datastreamer.FileEntry); ok { + r0 = rf(fromEntry) + } else { + r0 = ret.Get(0).(datastreamer.FileEntry) + } + + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(fromEntry) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ExecCommandGetHeader provides a mock function with given fields: +func (_m *StreamClientMock) ExecCommandGetHeader() (datastreamer.HeaderEntry, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ExecCommandGetHeader") + } + + var r0 datastreamer.HeaderEntry + var r1 error + if rf, ok := ret.Get(0).(func() (datastreamer.HeaderEntry, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() datastreamer.HeaderEntry); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(datastreamer.HeaderEntry) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ExecCommandStart provides a mock function with given fields: fromEntry +func (_m *StreamClientMock) ExecCommandStart(fromEntry uint64) error { + ret := _m.Called(fromEntry) + + if len(ret) == 0 { + panic("no return value specified for ExecCommandStart") + } + + var r0 error + if rf, ok := ret.Get(0).(func(uint64) error); ok { + r0 = rf(fromEntry) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ExecCommandStartBookmark provides a mock function with given fields: fromBookmark +func (_m *StreamClientMock) ExecCommandStartBookmark(fromBookmark []byte) error { + ret := _m.Called(fromBookmark) + + if len(ret) == 0 { + panic("no return value specified for ExecCommandStartBookmark") + } + + var r0 error + if rf, ok := ret.Get(0).(func([]byte) error); ok { + r0 = rf(fromBookmark) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ExecCommandStop provides a mock function with given fields: +func (_m *StreamClientMock) ExecCommandStop() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ExecCommandStop") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetFromStream provides a mock function with given fields: +func (_m *StreamClientMock) GetFromStream() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetFromStream") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// GetTotalEntries provides a mock function with given fields: +func (_m *StreamClientMock) GetTotalEntries() uint64 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetTotalEntries") + } + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + +// IsStarted provides a mock function with given fields: +func (_m *StreamClientMock) IsStarted() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsStarted") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// ResetProcessEntryFunc provides a mock function with given fields: +func (_m *StreamClientMock) ResetProcessEntryFunc() { + _m.Called() +} + +// SetProcessEntryFunc provides a mock function with given fields: f +func (_m *StreamClientMock) SetProcessEntryFunc(f datastreamer.ProcessEntryFunc) { + _m.Called(f) +} + +// Start provides a mock function with given fields: +func (_m *StreamClientMock) Start() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Start") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewStreamClientMock creates a new instance of StreamClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStreamClientMock(t interface { + mock.TestingT + Cleanup(func()) +}) *StreamClientMock { + mock := &StreamClientMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_agglayer_client.go b/aggregator/mocks/mock_agglayer_client.go new file mode 100644 index 00000000..2923ebe0 --- /dev/null +++ b/aggregator/mocks/mock_agglayer_client.go @@ -0,0 +1,79 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + agglayer "github.com/0xPolygon/cdk/aggregator/agglayer" + common "github.com/ethereum/go-ethereum/common" + + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// AgglayerClientInterfaceMock is an autogenerated mock type for the AgglayerClientInterface type +type AgglayerClientInterfaceMock struct { + mock.Mock +} + +// SendTx provides a mock function with given fields: signedTx +func (_m *AgglayerClientInterfaceMock) SendTx(signedTx agglayer.SignedTx) (common.Hash, error) { + ret := _m.Called(signedTx) + + if len(ret) == 0 { + panic("no return value specified for SendTx") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(agglayer.SignedTx) (common.Hash, error)); ok { + return rf(signedTx) + } + if rf, ok := ret.Get(0).(func(agglayer.SignedTx) common.Hash); ok { + r0 = rf(signedTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(agglayer.SignedTx) error); ok { + r1 = rf(signedTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WaitTxToBeMined provides a mock function with given fields: hash, ctx +func (_m *AgglayerClientInterfaceMock) WaitTxToBeMined(hash common.Hash, ctx context.Context) error { + ret := _m.Called(hash, ctx) + + if len(ret) == 0 { + panic("no return value specified for WaitTxToBeMined") + } + + var r0 error + if rf, ok := ret.Get(0).(func(common.Hash, context.Context) error); ok { + r0 = rf(hash, ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewAgglayerClientInterfaceMock creates a new instance of AgglayerClientInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAgglayerClientInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *AgglayerClientInterfaceMock { + mock := &AgglayerClientInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_dbtx.go b/aggregator/mocks/mock_dbtx.go new file mode 100644 index 00000000..f870cd57 --- /dev/null +++ b/aggregator/mocks/mock_dbtx.go @@ -0,0 +1,350 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + pgconn "github.com/jackc/pgconn" + mock "github.com/stretchr/testify/mock" + + pgx "github.com/jackc/pgx/v4" +) + +// DbTxMock is an autogenerated mock type for the Tx type +type DbTxMock struct { + mock.Mock +} + +// Begin provides a mock function with given fields: ctx +func (_m *DbTxMock) Begin(ctx context.Context) (pgx.Tx, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Begin") + } + + var r0 pgx.Tx + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (pgx.Tx, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) pgx.Tx); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgx.Tx) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BeginFunc provides a mock function with given fields: ctx, f +func (_m *DbTxMock) BeginFunc(ctx context.Context, f func(pgx.Tx) error) error { + ret := _m.Called(ctx, f) + + if len(ret) == 0 { + panic("no return value specified for BeginFunc") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, func(pgx.Tx) error) error); ok { + r0 = rf(ctx, f) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Commit provides a mock function with given fields: ctx +func (_m *DbTxMock) Commit(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Commit") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Conn provides a mock function with given fields: +func (_m *DbTxMock) Conn() *pgx.Conn { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Conn") + } + + var r0 *pgx.Conn + if rf, ok := ret.Get(0).(func() *pgx.Conn); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*pgx.Conn) + } + } + + return r0 +} + +// CopyFrom provides a mock function with given fields: ctx, tableName, columnNames, rowSrc +func (_m *DbTxMock) CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) { + ret := _m.Called(ctx, tableName, columnNames, rowSrc) + + if len(ret) == 0 { + panic("no return value specified for CopyFrom") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, pgx.Identifier, []string, pgx.CopyFromSource) (int64, error)); ok { + return rf(ctx, tableName, columnNames, rowSrc) + } + if rf, ok := ret.Get(0).(func(context.Context, pgx.Identifier, []string, pgx.CopyFromSource) int64); ok { + r0 = rf(ctx, tableName, columnNames, rowSrc) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, pgx.Identifier, []string, pgx.CopyFromSource) error); ok { + r1 = rf(ctx, tableName, columnNames, rowSrc) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Exec provides a mock function with given fields: ctx, sql, arguments +func (_m *DbTxMock) Exec(ctx context.Context, sql string, arguments ...interface{}) (pgconn.CommandTag, error) { + var _ca []interface{} + _ca = append(_ca, ctx, sql) + _ca = append(_ca, arguments...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Exec") + } + + var r0 pgconn.CommandTag + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) (pgconn.CommandTag, error)); ok { + return rf(ctx, sql, arguments...) + } + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) pgconn.CommandTag); ok { + r0 = rf(ctx, sql, arguments...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgconn.CommandTag) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, ...interface{}) error); ok { + r1 = rf(ctx, sql, arguments...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LargeObjects provides a mock function with given fields: +func (_m *DbTxMock) LargeObjects() pgx.LargeObjects { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for LargeObjects") + } + + var r0 pgx.LargeObjects + if rf, ok := ret.Get(0).(func() pgx.LargeObjects); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(pgx.LargeObjects) + } + + return r0 +} + +// Prepare provides a mock function with given fields: ctx, name, sql +func (_m *DbTxMock) Prepare(ctx context.Context, name string, sql string) (*pgconn.StatementDescription, error) { + ret := _m.Called(ctx, name, sql) + + if len(ret) == 0 { + panic("no return value specified for Prepare") + } + + var r0 *pgconn.StatementDescription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*pgconn.StatementDescription, error)); ok { + return rf(ctx, name, sql) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) *pgconn.StatementDescription); ok { + r0 = rf(ctx, name, sql) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*pgconn.StatementDescription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, name, sql) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Query provides a mock function with given fields: ctx, sql, args +func (_m *DbTxMock) Query(ctx context.Context, sql string, args ...interface{}) (pgx.Rows, error) { + var _ca []interface{} + _ca = append(_ca, ctx, sql) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Query") + } + + var r0 pgx.Rows + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) (pgx.Rows, error)); ok { + return rf(ctx, sql, args...) + } + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) pgx.Rows); ok { + r0 = rf(ctx, sql, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgx.Rows) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, ...interface{}) error); ok { + r1 = rf(ctx, sql, args...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryFunc provides a mock function with given fields: ctx, sql, args, scans, f +func (_m *DbTxMock) QueryFunc(ctx context.Context, sql string, args []interface{}, scans []interface{}, f func(pgx.QueryFuncRow) error) (pgconn.CommandTag, error) { + ret := _m.Called(ctx, sql, args, scans, f) + + if len(ret) == 0 { + panic("no return value specified for QueryFunc") + } + + var r0 pgconn.CommandTag + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, []interface{}, []interface{}, func(pgx.QueryFuncRow) error) (pgconn.CommandTag, error)); ok { + return rf(ctx, sql, args, scans, f) + } + if rf, ok := ret.Get(0).(func(context.Context, string, []interface{}, []interface{}, func(pgx.QueryFuncRow) error) pgconn.CommandTag); ok { + r0 = rf(ctx, sql, args, scans, f) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgconn.CommandTag) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, []interface{}, []interface{}, func(pgx.QueryFuncRow) error) error); ok { + r1 = rf(ctx, sql, args, scans, f) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryRow provides a mock function with given fields: ctx, sql, args +func (_m *DbTxMock) QueryRow(ctx context.Context, sql string, args ...interface{}) pgx.Row { + var _ca []interface{} + _ca = append(_ca, ctx, sql) + _ca = append(_ca, args...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for QueryRow") + } + + var r0 pgx.Row + if rf, ok := ret.Get(0).(func(context.Context, string, ...interface{}) pgx.Row); ok { + r0 = rf(ctx, sql, args...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgx.Row) + } + } + + return r0 +} + +// Rollback provides a mock function with given fields: ctx +func (_m *DbTxMock) Rollback(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for Rollback") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SendBatch provides a mock function with given fields: ctx, b +func (_m *DbTxMock) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults { + ret := _m.Called(ctx, b) + + if len(ret) == 0 { + panic("no return value specified for SendBatch") + } + + var r0 pgx.BatchResults + if rf, ok := ret.Get(0).(func(context.Context, *pgx.Batch) pgx.BatchResults); ok { + r0 = rf(ctx, b) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgx.BatchResults) + } + } + + return r0 +} + +// NewDbTxMock creates a new instance of DbTxMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewDbTxMock(t interface { + mock.TestingT + Cleanup(func()) +}) *DbTxMock { + mock := &DbTxMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_eth_tx_manager.go b/aggregator/mocks/mock_eth_tx_manager.go new file mode 100644 index 00000000..8db7a440 --- /dev/null +++ b/aggregator/mocks/mock_eth_tx_manager.go @@ -0,0 +1,258 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + + ethtxmanager "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + + kzg4844 "github.com/ethereum/go-ethereum/crypto/kzg4844" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" + + zkevm_ethtx_managertypes "github.com/0xPolygon/zkevm-ethtx-manager/types" +) + +// EthTxManagerClientMock is an autogenerated mock type for the EthTxManagerClient type +type EthTxManagerClientMock struct { + mock.Mock +} + +// Add provides a mock function with given fields: ctx, to, value, data, gasOffset, sidecar +func (_m *EthTxManagerClientMock) Add(ctx context.Context, to *common.Address, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error) { + ret := _m.Called(ctx, to, value, data, gasOffset, sidecar) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) (common.Hash, error)); ok { + return rf(ctx, to, value, data, gasOffset, sidecar) + } + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) common.Hash); ok { + r0 = rf(ctx, to, value, data, gasOffset, sidecar) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) error); ok { + r1 = rf(ctx, to, value, data, gasOffset, sidecar) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AddWithGas provides a mock function with given fields: ctx, to, value, data, gasOffset, sidecar, gas +func (_m *EthTxManagerClientMock) AddWithGas(ctx context.Context, to *common.Address, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar, gas uint64) (common.Hash, error) { + ret := _m.Called(ctx, to, value, data, gasOffset, sidecar, gas) + + if len(ret) == 0 { + panic("no return value specified for AddWithGas") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar, uint64) (common.Hash, error)); ok { + return rf(ctx, to, value, data, gasOffset, sidecar, gas) + } + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar, uint64) common.Hash); ok { + r0 = rf(ctx, to, value, data, gasOffset, sidecar, gas) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar, uint64) error); ok { + r1 = rf(ctx, to, value, data, gasOffset, sidecar, gas) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EncodeBlobData provides a mock function with given fields: data +func (_m *EthTxManagerClientMock) EncodeBlobData(data []byte) (kzg4844.Blob, error) { + ret := _m.Called(data) + + if len(ret) == 0 { + panic("no return value specified for EncodeBlobData") + } + + var r0 kzg4844.Blob + var r1 error + if rf, ok := ret.Get(0).(func([]byte) (kzg4844.Blob, error)); ok { + return rf(data) + } + if rf, ok := ret.Get(0).(func([]byte) kzg4844.Blob); ok { + r0 = rf(data) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(kzg4844.Blob) + } + } + + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(data) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MakeBlobSidecar provides a mock function with given fields: blobs +func (_m *EthTxManagerClientMock) MakeBlobSidecar(blobs []kzg4844.Blob) *types.BlobTxSidecar { + ret := _m.Called(blobs) + + if len(ret) == 0 { + panic("no return value specified for MakeBlobSidecar") + } + + var r0 *types.BlobTxSidecar + if rf, ok := ret.Get(0).(func([]kzg4844.Blob) *types.BlobTxSidecar); ok { + r0 = rf(blobs) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.BlobTxSidecar) + } + } + + return r0 +} + +// ProcessPendingMonitoredTxs provides a mock function with given fields: ctx, resultHandler +func (_m *EthTxManagerClientMock) ProcessPendingMonitoredTxs(ctx context.Context, resultHandler ethtxmanager.ResultHandler) { + _m.Called(ctx, resultHandler) +} + +// Remove provides a mock function with given fields: ctx, id +func (_m *EthTxManagerClientMock) Remove(ctx context.Context, id common.Hash) error { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for Remove") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) error); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RemoveAll provides a mock function with given fields: ctx +func (_m *EthTxManagerClientMock) RemoveAll(ctx context.Context) error { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for RemoveAll") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Result provides a mock function with given fields: ctx, id +func (_m *EthTxManagerClientMock) Result(ctx context.Context, id common.Hash) (zkevm_ethtx_managertypes.MonitoredTxResult, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for Result") + } + + var r0 zkevm_ethtx_managertypes.MonitoredTxResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (zkevm_ethtx_managertypes.MonitoredTxResult, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) zkevm_ethtx_managertypes.MonitoredTxResult); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Get(0).(zkevm_ethtx_managertypes.MonitoredTxResult) + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ResultsByStatus provides a mock function with given fields: ctx, statuses +func (_m *EthTxManagerClientMock) ResultsByStatus(ctx context.Context, statuses []zkevm_ethtx_managertypes.MonitoredTxStatus) ([]zkevm_ethtx_managertypes.MonitoredTxResult, error) { + ret := _m.Called(ctx, statuses) + + if len(ret) == 0 { + panic("no return value specified for ResultsByStatus") + } + + var r0 []zkevm_ethtx_managertypes.MonitoredTxResult + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []zkevm_ethtx_managertypes.MonitoredTxStatus) ([]zkevm_ethtx_managertypes.MonitoredTxResult, error)); ok { + return rf(ctx, statuses) + } + if rf, ok := ret.Get(0).(func(context.Context, []zkevm_ethtx_managertypes.MonitoredTxStatus) []zkevm_ethtx_managertypes.MonitoredTxResult); ok { + r0 = rf(ctx, statuses) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]zkevm_ethtx_managertypes.MonitoredTxResult) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []zkevm_ethtx_managertypes.MonitoredTxStatus) error); ok { + r1 = rf(ctx, statuses) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Start provides a mock function with given fields: +func (_m *EthTxManagerClientMock) Start() { + _m.Called() +} + +// Stop provides a mock function with given fields: +func (_m *EthTxManagerClientMock) Stop() { + _m.Called() +} + +// NewEthTxManagerClientMock creates a new instance of EthTxManagerClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthTxManagerClientMock(t interface { + mock.TestingT + Cleanup(func()) +}) *EthTxManagerClientMock { + mock := &EthTxManagerClientMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_etherman.go b/aggregator/mocks/mock_etherman.go new file mode 100644 index 00000000..351acef3 --- /dev/null +++ b/aggregator/mocks/mock_etherman.go @@ -0,0 +1,210 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + big "math/big" + + common "github.com/ethereum/go-ethereum/common" + + ethmantypes "github.com/0xPolygon/cdk/aggregator/ethmantypes" + + mock "github.com/stretchr/testify/mock" + + types "github.com/ethereum/go-ethereum/core/types" +) + +// EthermanMock is an autogenerated mock type for the Etherman type +type EthermanMock struct { + mock.Mock +} + +// BuildTrustedVerifyBatchesTxData provides a mock function with given fields: lastVerifiedBatch, newVerifiedBatch, inputs, beneficiary +func (_m *EthermanMock) BuildTrustedVerifyBatchesTxData(lastVerifiedBatch uint64, newVerifiedBatch uint64, inputs *ethmantypes.FinalProofInputs, beneficiary common.Address) (*common.Address, []byte, error) { + ret := _m.Called(lastVerifiedBatch, newVerifiedBatch, inputs, beneficiary) + + if len(ret) == 0 { + panic("no return value specified for BuildTrustedVerifyBatchesTxData") + } + + var r0 *common.Address + var r1 []byte + var r2 error + if rf, ok := ret.Get(0).(func(uint64, uint64, *ethmantypes.FinalProofInputs, common.Address) (*common.Address, []byte, error)); ok { + return rf(lastVerifiedBatch, newVerifiedBatch, inputs, beneficiary) + } + if rf, ok := ret.Get(0).(func(uint64, uint64, *ethmantypes.FinalProofInputs, common.Address) *common.Address); ok { + r0 = rf(lastVerifiedBatch, newVerifiedBatch, inputs, beneficiary) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*common.Address) + } + } + + if rf, ok := ret.Get(1).(func(uint64, uint64, *ethmantypes.FinalProofInputs, common.Address) []byte); ok { + r1 = rf(lastVerifiedBatch, newVerifiedBatch, inputs, beneficiary) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]byte) + } + } + + if rf, ok := ret.Get(2).(func(uint64, uint64, *ethmantypes.FinalProofInputs, common.Address) error); ok { + r2 = rf(lastVerifiedBatch, newVerifiedBatch, inputs, beneficiary) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// GetBatchAccInputHash provides a mock function with given fields: ctx, batchNumber +func (_m *EthermanMock) GetBatchAccInputHash(ctx context.Context, batchNumber uint64) (common.Hash, error) { + ret := _m.Called(ctx, batchNumber) + + if len(ret) == 0 { + panic("no return value specified for GetBatchAccInputHash") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (common.Hash, error)); ok { + return rf(ctx, batchNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) common.Hash); ok { + r0 = rf(ctx, batchNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, batchNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLatestBlockHeader provides a mock function with given fields: ctx +func (_m *EthermanMock) GetLatestBlockHeader(ctx context.Context) (*types.Header, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLatestBlockHeader") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*types.Header, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *types.Header); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLatestVerifiedBatchNum provides a mock function with given fields: +func (_m *EthermanMock) GetLatestVerifiedBatchNum() (uint64, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetLatestVerifiedBatchNum") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func() (uint64, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRollupId provides a mock function with given fields: +func (_m *EthermanMock) GetRollupId() uint32 { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GetRollupId") + } + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +// HeaderByNumber provides a mock function with given fields: ctx, number +func (_m *EthermanMock) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { + ret := _m.Called(ctx, number) + + if len(ret) == 0 { + panic("no return value specified for HeaderByNumber") + } + + var r0 *types.Header + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Header, error)); ok { + return rf(ctx, number) + } + if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Header); ok { + r0 = rf(ctx, number) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Header) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { + r1 = rf(ctx, number) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewEthermanMock creates a new instance of EthermanMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthermanMock(t interface { + mock.TestingT + Cleanup(func()) +}) *EthermanMock { + mock := &EthermanMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_prover.go b/aggregator/mocks/mock_prover.go new file mode 100644 index 00000000..72bd66dc --- /dev/null +++ b/aggregator/mocks/mock_prover.go @@ -0,0 +1,271 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + prover "github.com/0xPolygon/cdk/aggregator/prover" +) + +// ProverInterfaceMock is an autogenerated mock type for the ProverInterface type +type ProverInterfaceMock struct { + mock.Mock +} + +// Addr provides a mock function with given fields: +func (_m *ProverInterfaceMock) Addr() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Addr") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// AggregatedProof provides a mock function with given fields: inputProof1, inputProof2 +func (_m *ProverInterfaceMock) AggregatedProof(inputProof1 string, inputProof2 string) (*string, error) { + ret := _m.Called(inputProof1, inputProof2) + + if len(ret) == 0 { + panic("no return value specified for AggregatedProof") + } + + var r0 *string + var r1 error + if rf, ok := ret.Get(0).(func(string, string) (*string, error)); ok { + return rf(inputProof1, inputProof2) + } + if rf, ok := ret.Get(0).(func(string, string) *string); ok { + r0 = rf(inputProof1, inputProof2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*string) + } + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(inputProof1, inputProof2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BatchProof provides a mock function with given fields: input +func (_m *ProverInterfaceMock) BatchProof(input *prover.StatelessInputProver) (*string, error) { + ret := _m.Called(input) + + if len(ret) == 0 { + panic("no return value specified for BatchProof") + } + + var r0 *string + var r1 error + if rf, ok := ret.Get(0).(func(*prover.StatelessInputProver) (*string, error)); ok { + return rf(input) + } + if rf, ok := ret.Get(0).(func(*prover.StatelessInputProver) *string); ok { + r0 = rf(input) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*string) + } + } + + if rf, ok := ret.Get(1).(func(*prover.StatelessInputProver) error); ok { + r1 = rf(input) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FinalProof provides a mock function with given fields: inputProof, aggregatorAddr +func (_m *ProverInterfaceMock) FinalProof(inputProof string, aggregatorAddr string) (*string, error) { + ret := _m.Called(inputProof, aggregatorAddr) + + if len(ret) == 0 { + panic("no return value specified for FinalProof") + } + + var r0 *string + var r1 error + if rf, ok := ret.Get(0).(func(string, string) (*string, error)); ok { + return rf(inputProof, aggregatorAddr) + } + if rf, ok := ret.Get(0).(func(string, string) *string); ok { + r0 = rf(inputProof, aggregatorAddr) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*string) + } + } + + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(inputProof, aggregatorAddr) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ID provides a mock function with given fields: +func (_m *ProverInterfaceMock) ID() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ID") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// IsIdle provides a mock function with given fields: +func (_m *ProverInterfaceMock) IsIdle() (bool, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsIdle") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func() (bool, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Name provides a mock function with given fields: +func (_m *ProverInterfaceMock) Name() string { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Name") + } + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// WaitFinalProof provides a mock function with given fields: ctx, proofID +func (_m *ProverInterfaceMock) WaitFinalProof(ctx context.Context, proofID string) (*prover.FinalProof, error) { + ret := _m.Called(ctx, proofID) + + if len(ret) == 0 { + panic("no return value specified for WaitFinalProof") + } + + var r0 *prover.FinalProof + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*prover.FinalProof, error)); ok { + return rf(ctx, proofID) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *prover.FinalProof); ok { + r0 = rf(ctx, proofID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*prover.FinalProof) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, proofID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// WaitRecursiveProof provides a mock function with given fields: ctx, proofID +func (_m *ProverInterfaceMock) WaitRecursiveProof(ctx context.Context, proofID string) (string, common.Hash, error) { + ret := _m.Called(ctx, proofID) + + if len(ret) == 0 { + panic("no return value specified for WaitRecursiveProof") + } + + var r0 string + var r1 common.Hash + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, string) (string, common.Hash, error)); ok { + return rf(ctx, proofID) + } + if rf, ok := ret.Get(0).(func(context.Context, string) string); ok { + r0 = rf(ctx, proofID) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, string) common.Hash); ok { + r1 = rf(ctx, proofID) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(common.Hash) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, string) error); ok { + r2 = rf(ctx, proofID) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// NewProverInterfaceMock creates a new instance of ProverInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewProverInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *ProverInterfaceMock { + mock := &ProverInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_state.go b/aggregator/mocks/mock_state.go new file mode 100644 index 00000000..8879dd05 --- /dev/null +++ b/aggregator/mocks/mock_state.go @@ -0,0 +1,406 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + pgx "github.com/jackc/pgx/v4" + mock "github.com/stretchr/testify/mock" + + state "github.com/0xPolygon/cdk/state" +) + +// StateInterfaceMock is an autogenerated mock type for the StateInterface type +type StateInterfaceMock struct { + mock.Mock +} + +// AddBatch provides a mock function with given fields: ctx, dbBatch, dbTx +func (_m *StateInterfaceMock) AddBatch(ctx context.Context, dbBatch *state.DBBatch, dbTx pgx.Tx) error { + ret := _m.Called(ctx, dbBatch, dbTx) + + if len(ret) == 0 { + panic("no return value specified for AddBatch") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *state.DBBatch, pgx.Tx) error); ok { + r0 = rf(ctx, dbBatch, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AddGeneratedProof provides a mock function with given fields: ctx, proof, dbTx +func (_m *StateInterfaceMock) AddGeneratedProof(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) error { + ret := _m.Called(ctx, proof, dbTx) + + if len(ret) == 0 { + panic("no return value specified for AddGeneratedProof") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *state.Proof, pgx.Tx) error); ok { + r0 = rf(ctx, proof, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AddSequence provides a mock function with given fields: ctx, sequence, dbTx +func (_m *StateInterfaceMock) AddSequence(ctx context.Context, sequence state.Sequence, dbTx pgx.Tx) error { + ret := _m.Called(ctx, sequence, dbTx) + + if len(ret) == 0 { + panic("no return value specified for AddSequence") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, state.Sequence, pgx.Tx) error); ok { + r0 = rf(ctx, sequence, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// BeginStateTransaction provides a mock function with given fields: ctx +func (_m *StateInterfaceMock) BeginStateTransaction(ctx context.Context) (pgx.Tx, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for BeginStateTransaction") + } + + var r0 pgx.Tx + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (pgx.Tx, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) pgx.Tx); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(pgx.Tx) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CheckProofContainsCompleteSequences provides a mock function with given fields: ctx, proof, dbTx +func (_m *StateInterfaceMock) CheckProofContainsCompleteSequences(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) (bool, error) { + ret := _m.Called(ctx, proof, dbTx) + + if len(ret) == 0 { + panic("no return value specified for CheckProofContainsCompleteSequences") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *state.Proof, pgx.Tx) (bool, error)); ok { + return rf(ctx, proof, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, *state.Proof, pgx.Tx) bool); ok { + r0 = rf(ctx, proof, dbTx) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, *state.Proof, pgx.Tx) error); ok { + r1 = rf(ctx, proof, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CheckProofExistsForBatch provides a mock function with given fields: ctx, batchNumber, dbTx +func (_m *StateInterfaceMock) CheckProofExistsForBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (bool, error) { + ret := _m.Called(ctx, batchNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for CheckProofExistsForBatch") + } + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (bool, error)); ok { + return rf(ctx, batchNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) bool); ok { + r0 = rf(ctx, batchNumber, dbTx) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, batchNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CleanupGeneratedProofs provides a mock function with given fields: ctx, batchNumber, dbTx +func (_m *StateInterfaceMock) CleanupGeneratedProofs(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error { + ret := _m.Called(ctx, batchNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for CleanupGeneratedProofs") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) error); ok { + r0 = rf(ctx, batchNumber, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// CleanupLockedProofs provides a mock function with given fields: ctx, duration, dbTx +func (_m *StateInterfaceMock) CleanupLockedProofs(ctx context.Context, duration string, dbTx pgx.Tx) (int64, error) { + ret := _m.Called(ctx, duration, dbTx) + + if len(ret) == 0 { + panic("no return value specified for CleanupLockedProofs") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, pgx.Tx) (int64, error)); ok { + return rf(ctx, duration, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, string, pgx.Tx) int64); ok { + r0 = rf(ctx, duration, dbTx) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, pgx.Tx) error); ok { + r1 = rf(ctx, duration, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DeleteBatchesNewerThanBatchNumber provides a mock function with given fields: ctx, batchNumber, dbTx +func (_m *StateInterfaceMock) DeleteBatchesNewerThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error { + ret := _m.Called(ctx, batchNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for DeleteBatchesNewerThanBatchNumber") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) error); ok { + r0 = rf(ctx, batchNumber, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteBatchesOlderThanBatchNumber provides a mock function with given fields: ctx, batchNumber, dbTx +func (_m *StateInterfaceMock) DeleteBatchesOlderThanBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error { + ret := _m.Called(ctx, batchNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for DeleteBatchesOlderThanBatchNumber") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) error); ok { + r0 = rf(ctx, batchNumber, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteGeneratedProofs provides a mock function with given fields: ctx, batchNumber, batchNumberFinal, dbTx +func (_m *StateInterfaceMock) DeleteGeneratedProofs(ctx context.Context, batchNumber uint64, batchNumberFinal uint64, dbTx pgx.Tx) error { + ret := _m.Called(ctx, batchNumber, batchNumberFinal, dbTx) + + if len(ret) == 0 { + panic("no return value specified for DeleteGeneratedProofs") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) error); ok { + r0 = rf(ctx, batchNumber, batchNumberFinal, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DeleteUngeneratedProofs provides a mock function with given fields: ctx, dbTx +func (_m *StateInterfaceMock) DeleteUngeneratedProofs(ctx context.Context, dbTx pgx.Tx) error { + ret := _m.Called(ctx, dbTx) + + if len(ret) == 0 { + panic("no return value specified for DeleteUngeneratedProofs") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) error); ok { + r0 = rf(ctx, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// GetBatch provides a mock function with given fields: ctx, batchNumber, dbTx +func (_m *StateInterfaceMock) GetBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.DBBatch, error) { + ret := _m.Called(ctx, batchNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetBatch") + } + + var r0 *state.DBBatch + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.DBBatch, error)); ok { + return rf(ctx, batchNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.DBBatch); ok { + r0 = rf(ctx, batchNumber, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*state.DBBatch) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, batchNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetProofReadyToVerify provides a mock function with given fields: ctx, lastVerfiedBatchNumber, dbTx +func (_m *StateInterfaceMock) GetProofReadyToVerify(ctx context.Context, lastVerfiedBatchNumber uint64, dbTx pgx.Tx) (*state.Proof, error) { + ret := _m.Called(ctx, lastVerfiedBatchNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetProofReadyToVerify") + } + + var r0 *state.Proof + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.Proof, error)); ok { + return rf(ctx, lastVerfiedBatchNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.Proof); ok { + r0 = rf(ctx, lastVerfiedBatchNumber, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*state.Proof) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, lastVerfiedBatchNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetProofsToAggregate provides a mock function with given fields: ctx, dbTx +func (_m *StateInterfaceMock) GetProofsToAggregate(ctx context.Context, dbTx pgx.Tx) (*state.Proof, *state.Proof, error) { + ret := _m.Called(ctx, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetProofsToAggregate") + } + + var r0 *state.Proof + var r1 *state.Proof + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) (*state.Proof, *state.Proof, error)); ok { + return rf(ctx, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, pgx.Tx) *state.Proof); ok { + r0 = rf(ctx, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*state.Proof) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, pgx.Tx) *state.Proof); ok { + r1 = rf(ctx, dbTx) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*state.Proof) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, pgx.Tx) error); ok { + r2 = rf(ctx, dbTx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// UpdateGeneratedProof provides a mock function with given fields: ctx, proof, dbTx +func (_m *StateInterfaceMock) UpdateGeneratedProof(ctx context.Context, proof *state.Proof, dbTx pgx.Tx) error { + ret := _m.Called(ctx, proof, dbTx) + + if len(ret) == 0 { + panic("no return value specified for UpdateGeneratedProof") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *state.Proof, pgx.Tx) error); ok { + r0 = rf(ctx, proof, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewStateInterfaceMock creates a new instance of StateInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStateInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *StateInterfaceMock { + mock := &StateInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/mocks/mock_synchronizer.go b/aggregator/mocks/mock_synchronizer.go new file mode 100644 index 00000000..28811e8c --- /dev/null +++ b/aggregator/mocks/mock_synchronizer.go @@ -0,0 +1,321 @@ +// Code generated by mockery v2.39.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + synchronizer "github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer" +) + +// SynchronizerInterfaceMock is an autogenerated mock type for the Synchronizer type +type SynchronizerInterfaceMock struct { + mock.Mock +} + +// GetL1BlockByNumber provides a mock function with given fields: ctx, blockNumber +func (_m *SynchronizerInterfaceMock) GetL1BlockByNumber(ctx context.Context, blockNumber uint64) (*synchronizer.L1Block, error) { + ret := _m.Called(ctx, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for GetL1BlockByNumber") + } + + var r0 *synchronizer.L1Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (*synchronizer.L1Block, error)); ok { + return rf(ctx, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) *synchronizer.L1Block); ok { + r0 = rf(ctx, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*synchronizer.L1Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetL1InfoRootPerIndex provides a mock function with given fields: ctx, L1InfoTreeIndex +func (_m *SynchronizerInterfaceMock) GetL1InfoRootPerIndex(ctx context.Context, L1InfoTreeIndex uint32) (common.Hash, error) { + ret := _m.Called(ctx, L1InfoTreeIndex) + + if len(ret) == 0 { + panic("no return value specified for GetL1InfoRootPerIndex") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32) (common.Hash, error)); ok { + return rf(ctx, L1InfoTreeIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32) common.Hash); ok { + r0 = rf(ctx, L1InfoTreeIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { + r1 = rf(ctx, L1InfoTreeIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetL1InfoTreeLeaves provides a mock function with given fields: ctx, indexLeaves +func (_m *SynchronizerInterfaceMock) GetL1InfoTreeLeaves(ctx context.Context, indexLeaves []uint32) (map[uint32]synchronizer.L1InfoTreeLeaf, error) { + ret := _m.Called(ctx, indexLeaves) + + if len(ret) == 0 { + panic("no return value specified for GetL1InfoTreeLeaves") + } + + var r0 map[uint32]synchronizer.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []uint32) (map[uint32]synchronizer.L1InfoTreeLeaf, error)); ok { + return rf(ctx, indexLeaves) + } + if rf, ok := ret.Get(0).(func(context.Context, []uint32) map[uint32]synchronizer.L1InfoTreeLeaf); ok { + r0 = rf(ctx, indexLeaves) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[uint32]synchronizer.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []uint32) error); ok { + r1 = rf(ctx, indexLeaves) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLastL1Block provides a mock function with given fields: ctx +func (_m *SynchronizerInterfaceMock) GetLastL1Block(ctx context.Context) (*synchronizer.L1Block, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLastL1Block") + } + + var r0 *synchronizer.L1Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*synchronizer.L1Block, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *synchronizer.L1Block); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*synchronizer.L1Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLastestVirtualBatchNumber provides a mock function with given fields: ctx +func (_m *SynchronizerInterfaceMock) GetLastestVirtualBatchNumber(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLastestVirtualBatchNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetLeafsByL1InfoRoot provides a mock function with given fields: ctx, l1InfoRoot +func (_m *SynchronizerInterfaceMock) GetLeafsByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash) ([]synchronizer.L1InfoTreeLeaf, error) { + ret := _m.Called(ctx, l1InfoRoot) + + if len(ret) == 0 { + panic("no return value specified for GetLeafsByL1InfoRoot") + } + + var r0 []synchronizer.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ([]synchronizer.L1InfoTreeLeaf, error)); ok { + return rf(ctx, l1InfoRoot) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) []synchronizer.L1InfoTreeLeaf); ok { + r0 = rf(ctx, l1InfoRoot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]synchronizer.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, l1InfoRoot) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetSequenceByBatchNumber provides a mock function with given fields: ctx, batchNumber +func (_m *SynchronizerInterfaceMock) GetSequenceByBatchNumber(ctx context.Context, batchNumber uint64) (*synchronizer.SequencedBatches, error) { + ret := _m.Called(ctx, batchNumber) + + if len(ret) == 0 { + panic("no return value specified for GetSequenceByBatchNumber") + } + + var r0 *synchronizer.SequencedBatches + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (*synchronizer.SequencedBatches, error)); ok { + return rf(ctx, batchNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) *synchronizer.SequencedBatches); ok { + r0 = rf(ctx, batchNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*synchronizer.SequencedBatches) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, batchNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetVirtualBatchByBatchNumber provides a mock function with given fields: ctx, batchNumber +func (_m *SynchronizerInterfaceMock) GetVirtualBatchByBatchNumber(ctx context.Context, batchNumber uint64) (*synchronizer.VirtualBatch, error) { + ret := _m.Called(ctx, batchNumber) + + if len(ret) == 0 { + panic("no return value specified for GetVirtualBatchByBatchNumber") + } + + var r0 *synchronizer.VirtualBatch + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (*synchronizer.VirtualBatch, error)); ok { + return rf(ctx, batchNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) *synchronizer.VirtualBatch); ok { + r0 = rf(ctx, batchNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*synchronizer.VirtualBatch) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, batchNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// IsSynced provides a mock function with given fields: +func (_m *SynchronizerInterfaceMock) IsSynced() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsSynced") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// SetCallbackOnReorgDone provides a mock function with given fields: callback +func (_m *SynchronizerInterfaceMock) SetCallbackOnReorgDone(callback func(synchronizer.ReorgExecutionResult)) { + _m.Called(callback) +} + +// SetCallbackOnRollbackBatches provides a mock function with given fields: callback +func (_m *SynchronizerInterfaceMock) SetCallbackOnRollbackBatches(callback func(synchronizer.RollbackBatchesData)) { + _m.Called(callback) +} + +// Stop provides a mock function with given fields: +func (_m *SynchronizerInterfaceMock) Stop() { + _m.Called() +} + +// Sync provides a mock function with given fields: returnOnSync +func (_m *SynchronizerInterfaceMock) Sync(returnOnSync bool) error { + ret := _m.Called(returnOnSync) + + if len(ret) == 0 { + panic("no return value specified for Sync") + } + + var r0 error + if rf, ok := ret.Get(0).(func(bool) error); ok { + r0 = rf(returnOnSync) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewSynchronizerInterfaceMock creates a new instance of SynchronizerInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSynchronizerInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *SynchronizerInterfaceMock { + mock := &SynchronizerInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/aggregator/profitabilitychecker.go b/aggregator/profitabilitychecker.go index f05799eb..dc91a21e 100644 --- a/aggregator/profitabilitychecker.go +++ b/aggregator/profitabilitychecker.go @@ -18,14 +18,14 @@ const ( // TxProfitabilityCheckerBase checks pol collateral with min reward type TxProfitabilityCheckerBase struct { - State stateInterface + State StateInterface IntervalAfterWhichBatchSentAnyway time.Duration MinReward *big.Int } // NewTxProfitabilityCheckerBase init base tx profitability checker func NewTxProfitabilityCheckerBase( - state stateInterface, interval time.Duration, minReward *big.Int, + state StateInterface, interval time.Duration, minReward *big.Int, ) *TxProfitabilityCheckerBase { return &TxProfitabilityCheckerBase{ State: state, @@ -50,12 +50,12 @@ func (pc *TxProfitabilityCheckerBase) IsProfitable(ctx context.Context, polColla // TxProfitabilityCheckerAcceptAll validate batch anyway and don't check anything type TxProfitabilityCheckerAcceptAll struct { - State stateInterface + State StateInterface IntervalAfterWhichBatchSentAnyway time.Duration } // NewTxProfitabilityCheckerAcceptAll init tx profitability checker that accept all txs -func NewTxProfitabilityCheckerAcceptAll(state stateInterface, interval time.Duration) *TxProfitabilityCheckerAcceptAll { +func NewTxProfitabilityCheckerAcceptAll(state StateInterface, interval time.Duration) *TxProfitabilityCheckerAcceptAll { return &TxProfitabilityCheckerAcceptAll{ State: state, IntervalAfterWhichBatchSentAnyway: interval, @@ -77,7 +77,7 @@ func (pc *TxProfitabilityCheckerAcceptAll) IsProfitable(ctx context.Context, pol } // TODO: now it's impossible to check, when batch got consolidated, bcs it's not saved -// func isConsolidatedBatchAppeared(ctx context.Context, state stateInterface, +// func isConsolidatedBatchAppeared(ctx context.Context, state StateInterface, // intervalAfterWhichBatchConsolidatedAnyway time.Duration) (bool, error) { // batch, err := state.GetLastVerifiedBatch(ctx, nil) // if err != nil { diff --git a/test/Makefile b/test/Makefile index b72c101f..a1b51bb1 100644 --- a/test/Makefile +++ b/test/Makefile @@ -1,5 +1,7 @@ .PHONY: generate-mocks -generate-mocks: generate-mocks-bridgesync generate-mocks-reorgdetector generate-mocks-sequencesender generate-mocks-da generate-mocks-l1infotreesync generate-mocks-helpers generate-mocks-sync generate-mocks-l1infotreesync +generate-mocks: generate-mocks-bridgesync generate-mocks-reorgdetector generate-mocks-sequencesender \ + generate-mocks-da generate-mocks-l1infotreesync generate-mocks-helpers \ + generate-mocks-sync generate-mocks-l1infotreesync generate-mocks-aggregator .PHONY: generate-mocks-bridgesync @@ -34,8 +36,6 @@ generate-mocks-l1infotreesync: ## Generates mocks for l1infotreesync, using mock rm -Rf ../l1infotreesync/mocks export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../l1infotreesync --output ../l1infotreesync/mocks --outpkg mocks_l1infotreesync ${COMMON_MOCKERY_PARAMS} export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../l1infotreesync --outpkg=l1infotreesync --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go - - .PHONY: generate-mocks-aggoracle generate-mocks-helpers: ## Generates mocks for helpers , using mockery tool @@ -48,6 +48,16 @@ generate-mocks-sync: ## Generates mocks for sync, using mockery tool export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=processorInterface --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ProcessorMock --filename=mock_processor_test.go export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go +.PHONY: generate-mocks-aggregator +generate-mocks-aggregator: ## Generates mocks for aggregator, using mockery tool + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ProverInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=ProverInterfaceMock --filename=mock_prover.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Etherman --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=EthermanMock --filename=mock_etherman.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=StateInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=StateInterfaceMock --filename=mock_state.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=AgglayerClientInterface --dir=../aggregator/agglayer --output=../aggregator/mocks --outpkg=mocks --structname=AgglayerClientInterfaceMock --filename=mock_agglayer_client.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Synchronizer --srcpkg=github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer --output=../aggregator/mocks --outpkg=mocks --structname=SynchronizerInterfaceMock --filename=mock_synchronizer.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=StreamClient --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=StreamClientMock --filename=mock_streamclient.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManagerClient --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=EthTxManagerClientMock --filename=mock_eth_tx_manager.go + export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Tx --srcpkg=github.com/jackc/pgx/v4 --output=../aggregator/mocks --outpkg=mocks --structname=DbTxMock --filename=mock_dbtx.go .PHONY: test-e2e-fork9-validium test-e2e-fork9-validium: stop From fe655a6b6ee20675aae5cdc02ae75e697eceaf97 Mon Sep 17 00:00:00 2001 From: rbpol Date: Thu, 10 Oct 2024 18:04:55 +0100 Subject: [PATCH 2/4] feat: Add support for all the contracts on `test/helpers` so it's easy to build E2E tests (#115) --- aggoracle/e2e_test.go | 4 +- bridgesync/e2e_test.go | 44 +- claimsponsor/e2e_test.go | 3 +- .../datacommittee/datacommittee_test.go | 87 +--- l1infotreesync/e2e_test.go | 83 ++-- lastgersync/e2e_test.go | 4 +- reorgdetector/reorgdetector_test.go | 34 +- test/aggoraclehelpers/aggoracle_e2e.go | 199 +++++++++ test/helpers/aggoracle_e2e.go | 419 ------------------ test/helpers/simulated.go | 119 +++++ 10 files changed, 387 insertions(+), 609 deletions(-) create mode 100644 test/aggoraclehelpers/aggoracle_e2e.go delete mode 100644 test/helpers/aggoracle_e2e.go diff --git a/aggoracle/e2e_test.go b/aggoracle/e2e_test.go index 25a8a96d..b1506032 100644 --- a/aggoracle/e2e_test.go +++ b/aggoracle/e2e_test.go @@ -8,7 +8,7 @@ import ( gerContractL1 "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/globalexitrootnopush0" "github.com/0xPolygon/cdk/aggoracle" - "github.com/0xPolygon/cdk/test/helpers" + "github.com/0xPolygon/cdk/test/aggoraclehelpers" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient/simulated" @@ -16,7 +16,7 @@ import ( ) func TestEVM(t *testing.T) { - env := helpers.SetupAggoracleWithEVMChain(t) + env := aggoraclehelpers.SetupAggoracleWithEVMChain(t) runTest(t, env.GERL1Contract, env.AggOracleSender, env.L1Client, env.AuthL1) } diff --git a/bridgesync/e2e_test.go b/bridgesync/e2e_test.go index a19afb8d..c0a22484 100644 --- a/bridgesync/e2e_test.go +++ b/bridgesync/e2e_test.go @@ -8,60 +8,28 @@ import ( "testing" "time" - "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmbridgev2" "github.com/0xPolygon/cdk/bridgesync" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/reorgdetector" "github.com/0xPolygon/cdk/test/helpers" - "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient/simulated" "github.com/stretchr/testify/require" ) -func newSimulatedClient(t *testing.T, auth *bind.TransactOpts) ( - client *simulated.Backend, - bridgeAddr common.Address, - bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2, -) { - t.Helper() - - var err error - balance, _ := big.NewInt(0).SetString("10000000000000000000000000", 10) - address := auth.From - genesisAlloc := map[common.Address]types.Account{ - address: { - Balance: balance, - }, - } - blockGasLimit := uint64(999999999999999999) - client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) - - bridgeAddr, _, bridgeContract, err = polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(auth, client.Client()) - require.NoError(t, err) - client.Commit() - - return -} - func TestBridgeEventE2E(t *testing.T) { ctx := context.Background() dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") dbPathReorg := t.TempDir() - privateKey, err := crypto.GenerateKey() - require.NoError(t, err) - auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - require.NoError(t, err) - client, bridgeAddr, bridgeSc := newSimulatedClient(t, auth) + + client, setup := helpers.SimulatedBackend(t, nil, 0) rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg}) require.NoError(t, err) go rd.Start(ctx) //nolint:errcheck testClient := helpers.TestClient{ClientRenamed: client.Client()} - syncer, err := bridgesync.NewL1(ctx, dbPathSyncer, bridgeAddr, 10, etherman.LatestBlock, rd, testClient, 0, time.Millisecond*10, 0, 0) + syncer, err := bridgesync.NewL1(ctx, dbPathSyncer, setup.EBZkevmBridgeAddr, 10, etherman.LatestBlock, rd, testClient, 0, time.Millisecond*10, 0, 0) require.NoError(t, err) go syncer.Start(ctx) @@ -71,15 +39,15 @@ func TestBridgeEventE2E(t *testing.T) { for i := 0; i < 100; i++ { bridge := bridgesync.Bridge{ - BlockNum: uint64(2 + i), + BlockNum: uint64(4 + i), Amount: big.NewInt(0), DepositCount: uint32(i), DestinationNetwork: 3, DestinationAddress: common.HexToAddress("f00"), Metadata: []byte{}, } - tx, err := bridgeSc.BridgeAsset( - auth, + tx, err := setup.EBZkevmBridgeContract.BridgeAsset( + setup.UserAuth, bridge.DestinationNetwork, bridge.DestinationAddress, bridge.Amount, diff --git a/claimsponsor/e2e_test.go b/claimsponsor/e2e_test.go index 8a037a58..b4fce499 100644 --- a/claimsponsor/e2e_test.go +++ b/claimsponsor/e2e_test.go @@ -13,6 +13,7 @@ import ( "github.com/0xPolygon/cdk/claimsponsor" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/test/aggoraclehelpers" "github.com/0xPolygon/cdk/test/helpers" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -22,7 +23,7 @@ import ( func TestE2EL1toEVML2(t *testing.T) { // start other needed components ctx := context.Background() - env := helpers.SetupAggoracleWithEVMChain(t) + env := aggoraclehelpers.SetupAggoracleWithEVMChain(t) dbPathBridgeSyncL1 := path.Join(t.TempDir(), "file::memory:?cache=shared") testClient := helpers.TestClient{ClientRenamed: env.L1Client.Client()} bridgeSyncL1, err := bridgesync.NewL1(ctx, dbPathBridgeSyncL1, env.BridgeL1Addr, 10, etherman.LatestBlock, env.ReorgDetector, testClient, 0, time.Millisecond*10, 0, 0) diff --git a/dataavailability/datacommittee/datacommittee_test.go b/dataavailability/datacommittee/datacommittee_test.go index fcacef3c..7e2a8d3e 100644 --- a/dataavailability/datacommittee/datacommittee_test.go +++ b/dataavailability/datacommittee/datacommittee_test.go @@ -9,9 +9,9 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/banana/polygondatacommittee" "github.com/0xPolygon/cdk/log" erc1967proxy "github.com/0xPolygon/cdk/test/contracts/erc1967proxy" + "github.com/0xPolygon/cdk/test/helpers" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethclient/simulated" "github.com/stretchr/testify/assert" @@ -20,7 +20,7 @@ import ( func TestUpdateDataCommitteeEvent(t *testing.T) { // Set up testing environment - dac, ethBackend, auth, da := newTestingEnv(t) + dac, ethBackend, da, auth := newSimulatedDacman(t) // Update the committee requiredAmountOfSignatures := big.NewInt(2) @@ -63,82 +63,39 @@ func init() { }) } -// This function prepare the blockchain, the wallet with funds and deploy the smc -func newTestingEnv(t *testing.T) ( - dac *Backend, - ethBackend *simulated.Backend, - auth *bind.TransactOpts, - da *polygondatacommittee.Polygondatacommittee, -) { - t.Helper() - privateKey, err := crypto.GenerateKey() - if err != nil { - log.Fatal(err) - } - auth, err = bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - if err != nil { - log.Fatal(err) - } - dac, ethBackend, da, err = newSimulatedDacman(t, auth) - if err != nil { - log.Fatal(err) - } - - return dac, ethBackend, auth, da -} - // NewSimulatedEtherman creates an etherman that uses a simulated blockchain. It's important to notice that the ChainID of the auth // must be 1337. The address that holds the auth will have an initial balance of 10 ETH -func newSimulatedDacman(t *testing.T, auth *bind.TransactOpts) ( - dacman *Backend, - ethBackend *simulated.Backend, - da *polygondatacommittee.Polygondatacommittee, - err error, +func newSimulatedDacman(t *testing.T) ( + *Backend, + *simulated.Backend, + *polygondatacommittee.Polygondatacommittee, + *bind.TransactOpts, ) { t.Helper() - if auth == nil { - // read only client - return &Backend{}, nil, nil, nil - } - // 10000000 ETH in wei - balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) - address := auth.From - genesisAlloc := map[common.Address]types.Account{ - address: { - Balance: balance, - }, - } - blockGasLimit := uint64(999999999999999999) - client := simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) + + ethBackend, setup := helpers.SimulatedBackend(t, nil, 0) // DAC Setup - addr, _, _, err := smcparis.DeployPolygondatacommittee(auth, client.Client()) - if err != nil { - return &Backend{}, nil, nil, err - } - client.Commit() - proxyAddr, err := deployDACProxy(auth, client.Client(), addr) - if err != nil { - return &Backend{}, nil, nil, err - } + addr, _, _, err := smcparis.DeployPolygondatacommittee(setup.UserAuth, ethBackend.Client()) + require.NoError(t, err) + ethBackend.Commit() - client.Commit() - da, err = polygondatacommittee.NewPolygondatacommittee(proxyAddr, client.Client()) - if err != nil { - return &Backend{}, nil, nil, err - } + proxyAddr, err := deployDACProxy(setup.UserAuth, ethBackend.Client(), addr) + require.NoError(t, err) + ethBackend.Commit() - _, err = da.SetupCommittee(auth, big.NewInt(0), []string{}, []byte{}) - if err != nil { - return &Backend{}, nil, nil, err - } - client.Commit() + da, err := polygondatacommittee.NewPolygondatacommittee(proxyAddr, ethBackend.Client()) + require.NoError(t, err) + + _, err = da.SetupCommittee(setup.UserAuth, big.NewInt(0), []string{}, []byte{}) + require.NoError(t, err) + ethBackend.Commit() c := &Backend{ dataCommitteeContract: da, } - return c, client, da, nil + return c, ethBackend, da, setup.UserAuth } func deployDACProxy(auth *bind.TransactOpts, client bind.ContractBackend, dacImpl common.Address) (common.Address, error) { diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index c522c73a..94596f23 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -2,7 +2,6 @@ package l1infotreesync_test import ( "context" - "errors" "fmt" "math/big" "path" @@ -16,6 +15,7 @@ import ( "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/reorgdetector" "github.com/0xPolygon/cdk/test/contracts/verifybatchesmock" + "github.com/0xPolygon/cdk/test/helpers" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -25,61 +25,44 @@ import ( "github.com/stretchr/testify/require" ) -func newSimulatedClient(auth *bind.TransactOpts) ( - client *simulated.Backend, - gerAddr common.Address, - verifyAddr common.Address, - gerContract *polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2, - verifyContract *verifybatchesmock.Verifybatchesmock, - err error, +func newSimulatedClient(t *testing.T) ( + *simulated.Backend, + *bind.TransactOpts, + common.Address, + common.Address, + *polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2, + *verifybatchesmock.Verifybatchesmock, ) { + t.Helper() + ctx := context.Background() - balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) - address := auth.From - genesisAlloc := map[common.Address]types.Account{ - address: { - Balance: balance, - }, - } - blockGasLimit := uint64(999999999999999999) - client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) + client, setup := helpers.SimulatedBackend(t, nil, 0) - nonce, err := client.Client().PendingNonceAt(ctx, auth.From) - if err != nil { - return - } - precalculatedAddr := crypto.CreateAddress(auth.From, nonce+1) - verifyAddr, _, verifyContract, err = verifybatchesmock.DeployVerifybatchesmock(auth, client.Client(), precalculatedAddr) - if err != nil { - return - } + nonce, err := client.Client().PendingNonceAt(ctx, setup.UserAuth.From) + require.NoError(t, err) + + precalculatedAddr := crypto.CreateAddress(setup.UserAuth.From, nonce+1) + verifyAddr, _, verifyContract, err := verifybatchesmock.DeployVerifybatchesmock(setup.UserAuth, client.Client(), precalculatedAddr) + require.NoError(t, err) client.Commit() - gerAddr, _, gerContract, err = polygonzkevmglobalexitrootv2.DeployPolygonzkevmglobalexitrootv2(auth, client.Client(), verifyAddr, auth.From) - if err != nil { - return - } + gerAddr, _, gerContract, err := polygonzkevmglobalexitrootv2.DeployPolygonzkevmglobalexitrootv2(setup.UserAuth, client.Client(), verifyAddr, setup.UserAuth.From) + require.NoError(t, err) client.Commit() - if precalculatedAddr != gerAddr { - err = errors.New("error calculating addr") - } + require.Equal(t, precalculatedAddr, gerAddr) - return + return client, setup.UserAuth, gerAddr, verifyAddr, gerContract, verifyContract } func TestE2E(t *testing.T) { ctx := context.Background() dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") - privateKey, err := crypto.GenerateKey() - require.NoError(t, err) - auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - require.NoError(t, err) + rdm := l1infotreesync.NewReorgDetectorMock(t) rdm.On("Subscribe", mock.Anything).Return(&reorgdetector.Subscription{}, nil) rdm.On("AddBlockToTrack", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) - client, gerAddr, verifyAddr, gerSc, verifySC, err := newSimulatedClient(auth) - require.NoError(t, err) + client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) syncer, err := l1infotreesync.New(ctx, dbPath, gerAddr, verifyAddr, 10, etherman.LatestBlock, rdm, client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3, l1infotreesync.FlagAllowWrongContractsAddrs) require.NoError(t, err) @@ -165,15 +148,13 @@ func TestWithReorgs(t *testing.T) { ctx := context.Background() dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") dbPathReorg := t.TempDir() - privateKey, err := crypto.GenerateKey() - require.NoError(t, err) - auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - require.NoError(t, err) - client, gerAddr, verifyAddr, gerSc, verifySC, err := newSimulatedClient(auth) - require.NoError(t, err) + + client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) + rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 30)}) require.NoError(t, err) require.NoError(t, rd.Start(ctx)) + syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerAddr, verifyAddr, 10, etherman.LatestBlock, rd, client.Client(), time.Millisecond, 0, time.Second, 25, l1infotreesync.FlagAllowWrongContractsAddrs) require.NoError(t, err) @@ -285,15 +266,13 @@ func TestStressAndReorgs(t *testing.T) { ctx := context.Background() dbPathSyncer := path.Join(t.TempDir(), "file:TestStressAndReorgs:memory:?cache=shared") dbPathReorg := t.TempDir() - privateKey, err := crypto.GenerateKey() - require.NoError(t, err) - auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - require.NoError(t, err) - client, gerAddr, verifyAddr, gerSc, verifySC, err := newSimulatedClient(auth) - require.NoError(t, err) + + client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) + rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 100)}) require.NoError(t, err) require.NoError(t, rd.Start(ctx)) + syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerAddr, verifyAddr, 10, etherman.LatestBlock, rd, client.Client(), time.Millisecond, 0, time.Second, 100, l1infotreesync.FlagAllowWrongContractsAddrs) require.NoError(t, err) diff --git a/lastgersync/e2e_test.go b/lastgersync/e2e_test.go index 979d55a2..e4d5e407 100644 --- a/lastgersync/e2e_test.go +++ b/lastgersync/e2e_test.go @@ -9,7 +9,7 @@ import ( "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/lastgersync" - "github.com/0xPolygon/cdk/test/helpers" + "github.com/0xPolygon/cdk/test/aggoraclehelpers" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" @@ -17,7 +17,7 @@ import ( func TestE2E(t *testing.T) { ctx := context.Background() - env := helpers.SetupAggoracleWithEVMChain(t) + env := aggoraclehelpers.SetupAggoracleWithEVMChain(t) dbPathSyncer := t.TempDir() syncer, err := lastgersync.New( ctx, diff --git a/reorgdetector/reorgdetector_test.go b/reorgdetector/reorgdetector_test.go index 7efe0892..c99bb484 100644 --- a/reorgdetector/reorgdetector_test.go +++ b/reorgdetector/reorgdetector_test.go @@ -2,47 +2,21 @@ package reorgdetector import ( "context" - big "math/big" "testing" "time" cdktypes "github.com/0xPolygon/cdk/config/types" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient/simulated" + "github.com/0xPolygon/cdk/test/helpers" "github.com/stretchr/testify/require" ) -func newSimulatedL1(t *testing.T, auth *bind.TransactOpts) *simulated.Backend { - t.Helper() - - balance, _ := new(big.Int).SetString("10000000000000000000000000", 10) - - blockGasLimit := uint64(999999999999999999) - client := simulated.NewBackend(map[common.Address]types.Account{ - auth.From: { - Balance: balance, - }, - }, simulated.WithBlockGasLimit(blockGasLimit)) - client.Commit() - - return client -} - func Test_ReorgDetector(t *testing.T) { const subID = "test" ctx := context.Background() // Simulated L1 - privateKeyL1, err := crypto.GenerateKey() - require.NoError(t, err) - authL1, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(1337)) - require.NoError(t, err) - clientL1 := newSimulatedL1(t, authL1) - require.NoError(t, err) + clientL1, _ := helpers.SimulatedBackend(t, nil, 0) // Create test DB dir testDir := t.TempDir() @@ -92,6 +66,6 @@ func Test_ReorgDetector(t *testing.T) { headersList, ok := reorgDetector.trackedBlocks[subID] reorgDetector.trackedBlocksLock.Unlock() require.True(t, ok) - require.Equal(t, 1, headersList.len()) // Only block 2 left - require.Equal(t, remainingHeader.Hash(), headersList.get(2).Hash) + require.Equal(t, 1, headersList.len()) // Only block 3 left + require.Equal(t, remainingHeader.Hash(), headersList.get(4).Hash) } diff --git a/test/aggoraclehelpers/aggoracle_e2e.go b/test/aggoraclehelpers/aggoracle_e2e.go new file mode 100644 index 00000000..be362ccc --- /dev/null +++ b/test/aggoraclehelpers/aggoracle_e2e.go @@ -0,0 +1,199 @@ +package aggoraclehelpers + +import ( + "context" + "path" + "testing" + "time" + + "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmbridgev2" + gerContractL1 "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/globalexitrootnopush0" + gerContractEVMChain "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/pessimisticglobalexitrootnopush0" + "github.com/0xPolygon/cdk/aggoracle" + "github.com/0xPolygon/cdk/aggoracle/chaingersender" + "github.com/0xPolygon/cdk/etherman" + "github.com/0xPolygon/cdk/l1infotreesync" + "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/reorgdetector" + "github.com/0xPolygon/cdk/test/helpers" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient/simulated" + "github.com/stretchr/testify/require" +) + +const ( + NetworkIDL2 = uint32(1) + syncBlockChunkSize = 10 + retries = 3 + periodRetry = time.Millisecond * 100 +) + +type AggoracleWithEVMChainEnv struct { + L1Client *simulated.Backend + L2Client *simulated.Backend + L1InfoTreeSync *l1infotreesync.L1InfoTreeSync + GERL1Contract *gerContractL1.Globalexitrootnopush0 + GERL1Addr common.Address + GERL2Contract *gerContractEVMChain.Pessimisticglobalexitrootnopush0 + GERL2Addr common.Address + AuthL1 *bind.TransactOpts + AuthL2 *bind.TransactOpts + AggOracle *aggoracle.AggOracle + AggOracleSender aggoracle.ChainSender + ReorgDetector *reorgdetector.ReorgDetector + BridgeL1Contract *polygonzkevmbridgev2.Polygonzkevmbridgev2 + BridgeL1Addr common.Address + BridgeL2Contract *polygonzkevmbridgev2.Polygonzkevmbridgev2 + BridgeL2Addr common.Address + NetworkIDL2 uint32 + EthTxManMockL2 *helpers.EthTxManagerMock +} + +func SetupAggoracleWithEVMChain(t *testing.T) *AggoracleWithEVMChainEnv { + t.Helper() + + ctx := context.Background() + l1Client, syncer, gerL1Contract, gerL1Addr, bridgeL1Contract, bridgeL1Addr, authL1, rd := CommonSetup(t) + sender, l2Client, gerL2Contract, gerL2Addr, bridgeL2Contract, bridgeL2Addr, authL2, ethTxManMockL2 := EVMSetup(t) + oracle, err := aggoracle.New( + log.GetDefaultLogger(), sender, + l1Client.Client(), syncer, + etherman.LatestBlock, time.Millisecond*20) //nolint:mnd + require.NoError(t, err) + go oracle.Start(ctx) + + return &AggoracleWithEVMChainEnv{ + L1Client: l1Client, + L2Client: l2Client, + L1InfoTreeSync: syncer, + GERL1Contract: gerL1Contract, + GERL1Addr: gerL1Addr, + GERL2Contract: gerL2Contract, + GERL2Addr: gerL2Addr, + AuthL1: authL1, + AuthL2: authL2, + AggOracle: oracle, + AggOracleSender: sender, + ReorgDetector: rd, + BridgeL1Contract: bridgeL1Contract, + BridgeL1Addr: bridgeL1Addr, + BridgeL2Contract: bridgeL2Contract, + BridgeL2Addr: bridgeL2Addr, + NetworkIDL2: NetworkIDL2, + EthTxManMockL2: ethTxManMockL2, + } +} + +func CommonSetup(t *testing.T) ( + *simulated.Backend, + *l1infotreesync.L1InfoTreeSync, + *gerContractL1.Globalexitrootnopush0, + common.Address, + *polygonzkevmbridgev2.Polygonzkevmbridgev2, + common.Address, + *bind.TransactOpts, + *reorgdetector.ReorgDetector, +) { + t.Helper() + + // Config and spin up + ctx := context.Background() + + // Simulated L1 + l1Client, authL1, gerL1Addr, gerL1Contract, bridgeL1Addr, bridgeL1Contract := newSimulatedL1(t) + + // Reorg detector + dbPathReorgDetector := t.TempDir() + reorg, err := reorgdetector.New(l1Client.Client(), reorgdetector.Config{DBPath: dbPathReorgDetector}) + require.NoError(t, err) + + // Syncer + dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") + syncer, err := l1infotreesync.New(ctx, dbPathSyncer, + gerL1Addr, common.Address{}, + syncBlockChunkSize, etherman.LatestBlock, + reorg, l1Client.Client(), + time.Millisecond, 0, periodRetry, retries, l1infotreesync.FlagAllowWrongContractsAddrs) + require.NoError(t, err) + go syncer.Start(ctx) + + return l1Client, syncer, gerL1Contract, gerL1Addr, bridgeL1Contract, bridgeL1Addr, authL1, reorg +} + +func EVMSetup(t *testing.T) ( + aggoracle.ChainSender, + *simulated.Backend, + *gerContractEVMChain.Pessimisticglobalexitrootnopush0, + common.Address, + *polygonzkevmbridgev2.Polygonzkevmbridgev2, + common.Address, + *bind.TransactOpts, + *helpers.EthTxManagerMock, +) { + t.Helper() + + l2Client, authL2, gerL2Addr, gerL2Sc, bridgeL2Addr, bridgeL2Sc := newSimulatedEVMAggSovereignChain(t) + ethTxManMock := helpers.NewEthTxManMock(t, l2Client, authL2) + sender, err := chaingersender.NewEVMChainGERSender(log.GetDefaultLogger(), + gerL2Addr, authL2.From, l2Client.Client(), ethTxManMock, 0, time.Millisecond*50) //nolint:mnd + require.NoError(t, err) + + return sender, l2Client, gerL2Sc, gerL2Addr, bridgeL2Sc, bridgeL2Addr, authL2, ethTxManMock +} + +func newSimulatedL1(t *testing.T) ( + *simulated.Backend, + *bind.TransactOpts, + common.Address, + *gerContractL1.Globalexitrootnopush0, + common.Address, + *polygonzkevmbridgev2.Polygonzkevmbridgev2, +) { + t.Helper() + + client, setup := helpers.SimulatedBackend(t, nil, 0) + + precalculatedAddr := crypto.CreateAddress(setup.DeployerAuth.From, 2) //nolint:mnd + + gerAddr, _, gerContract, err := gerContractL1.DeployGlobalexitrootnopush0(setup.DeployerAuth, client.Client(), + setup.UserAuth.From, setup.EBZkevmBridgeProxyAddr) + require.NoError(t, err) + client.Commit() + + require.Equal(t, precalculatedAddr, gerAddr) + + return client, setup.UserAuth, gerAddr, gerContract, setup.EBZkevmBridgeProxyAddr, setup.EBZkevmBridgeProxyContract +} + +func newSimulatedEVMAggSovereignChain(t *testing.T) ( + *simulated.Backend, + *bind.TransactOpts, + common.Address, + *gerContractEVMChain.Pessimisticglobalexitrootnopush0, + common.Address, + *polygonzkevmbridgev2.Polygonzkevmbridgev2, +) { + t.Helper() + + client, setup := helpers.SimulatedBackend(t, nil, NetworkIDL2) + + precalculatedAddr := crypto.CreateAddress(setup.DeployerAuth.From, 2) //nolint:mnd + + gerAddr, _, gerContract, err := gerContractEVMChain.DeployPessimisticglobalexitrootnopush0( + setup.DeployerAuth, client.Client(), setup.UserAuth.From) + require.NoError(t, err) + client.Commit() + + globalExitRootSetterRole := common.HexToHash("0x7b95520991dfda409891be0afa2635b63540f92ee996fda0bf695a166e5c5176") + _, err = gerContract.GrantRole(setup.DeployerAuth, globalExitRootSetterRole, setup.UserAuth.From) + require.NoError(t, err) + client.Commit() + + hasRole, _ := gerContract.HasRole(&bind.CallOpts{Pending: false}, globalExitRootSetterRole, setup.UserAuth.From) + require.True(t, hasRole) + require.Equal(t, precalculatedAddr, gerAddr) + + return client, setup.UserAuth, gerAddr, gerContract, setup.EBZkevmBridgeProxyAddr, setup.EBZkevmBridgeProxyContract +} diff --git a/test/helpers/aggoracle_e2e.go b/test/helpers/aggoracle_e2e.go deleted file mode 100644 index a19cfd42..00000000 --- a/test/helpers/aggoracle_e2e.go +++ /dev/null @@ -1,419 +0,0 @@ -package helpers - -import ( - "context" - "errors" - "fmt" - "math/big" - "path" - "testing" - "time" - - "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmbridgev2" - gerContractL1 "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/globalexitrootnopush0" - gerContractEVMChain "github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/pessimisticglobalexitrootnopush0" - "github.com/0xPolygon/cdk/aggoracle" - "github.com/0xPolygon/cdk/aggoracle/chaingersender" - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/reorgdetector" - "github.com/0xPolygon/cdk/test/contracts/transparentupgradableproxy" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient/simulated" - "github.com/stretchr/testify/require" -) - -const ( - NetworkIDL2 = uint32(1) - chainID = 1337 - initialBalance = "10000000000000000000000000" - blockGasLimit = uint64(999999999999999999) - syncBlockChunkSize = 10 - retries = 3 - periodRetry = time.Millisecond * 100 -) - -type AggoracleWithEVMChainEnv struct { - L1Client *simulated.Backend - L2Client *simulated.Backend - L1InfoTreeSync *l1infotreesync.L1InfoTreeSync - GERL1Contract *gerContractL1.Globalexitrootnopush0 - GERL1Addr common.Address - GERL2Contract *gerContractEVMChain.Pessimisticglobalexitrootnopush0 - GERL2Addr common.Address - AuthL1 *bind.TransactOpts - AuthL2 *bind.TransactOpts - AggOracle *aggoracle.AggOracle - AggOracleSender aggoracle.ChainSender - ReorgDetector *reorgdetector.ReorgDetector - BridgeL1Contract *polygonzkevmbridgev2.Polygonzkevmbridgev2 - BridgeL1Addr common.Address - BridgeL2Contract *polygonzkevmbridgev2.Polygonzkevmbridgev2 - BridgeL2Addr common.Address - NetworkIDL2 uint32 - EthTxManMockL2 *EthTxManagerMock -} - -func SetupAggoracleWithEVMChain(t *testing.T) *AggoracleWithEVMChainEnv { - t.Helper() - - ctx := context.Background() - l1Client, syncer, gerL1Contract, gerL1Addr, bridgeL1Contract, bridgeL1Addr, authL1, rd := CommonSetup(t) - sender, l2Client, gerL2Contract, gerL2Addr, bridgeL2Contract, bridgeL2Addr, authL2, ethTxManMockL2 := EVMSetup(t) - oracle, err := aggoracle.New( - log.GetDefaultLogger(), sender, - l1Client.Client(), syncer, - etherman.LatestBlock, time.Millisecond*20) //nolint:mnd - require.NoError(t, err) - go oracle.Start(ctx) - - return &AggoracleWithEVMChainEnv{ - L1Client: l1Client, - L2Client: l2Client, - L1InfoTreeSync: syncer, - GERL1Contract: gerL1Contract, - GERL1Addr: gerL1Addr, - GERL2Contract: gerL2Contract, - GERL2Addr: gerL2Addr, - AuthL1: authL1, - AuthL2: authL2, - AggOracle: oracle, - AggOracleSender: sender, - ReorgDetector: rd, - BridgeL1Contract: bridgeL1Contract, - BridgeL1Addr: bridgeL1Addr, - BridgeL2Contract: bridgeL2Contract, - BridgeL2Addr: bridgeL2Addr, - NetworkIDL2: NetworkIDL2, - EthTxManMockL2: ethTxManMockL2, - } -} - -func CommonSetup(t *testing.T) ( - *simulated.Backend, - *l1infotreesync.L1InfoTreeSync, - *gerContractL1.Globalexitrootnopush0, - common.Address, - *polygonzkevmbridgev2.Polygonzkevmbridgev2, - common.Address, - *bind.TransactOpts, - *reorgdetector.ReorgDetector, -) { - t.Helper() - - // Config and spin up - ctx := context.Background() - // Simulated L1 - privateKeyL1, err := crypto.GenerateKey() - require.NoError(t, err) - authL1, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(chainID)) - require.NoError(t, err) - l1Client, gerL1Addr, gerL1Contract, bridgeL1Addr, bridgeL1Contract, err := newSimulatedL1(authL1) - require.NoError(t, err) - // Reorg detector - dbPathReorgDetector := t.TempDir() - reorg, err := reorgdetector.New(l1Client.Client(), reorgdetector.Config{DBPath: dbPathReorgDetector}) - require.NoError(t, err) - // Syncer - dbPathSyncer := path.Join(t.TempDir(), "file::memory:?cache=shared") - syncer, err := l1infotreesync.New(ctx, dbPathSyncer, - gerL1Addr, common.Address{}, - syncBlockChunkSize, etherman.LatestBlock, - reorg, l1Client.Client(), - time.Millisecond, 0, periodRetry, retries, l1infotreesync.FlagAllowWrongContractsAddrs) - require.NoError(t, err) - go syncer.Start(ctx) - - return l1Client, syncer, gerL1Contract, gerL1Addr, bridgeL1Contract, bridgeL1Addr, authL1, reorg -} - -func EVMSetup(t *testing.T) ( - aggoracle.ChainSender, - *simulated.Backend, - *gerContractEVMChain.Pessimisticglobalexitrootnopush0, - common.Address, - *polygonzkevmbridgev2.Polygonzkevmbridgev2, - common.Address, - *bind.TransactOpts, - *EthTxManagerMock, -) { - t.Helper() - - privateKeyL2, err := crypto.GenerateKey() - require.NoError(t, err) - authL2, err := bind.NewKeyedTransactorWithChainID(privateKeyL2, big.NewInt(chainID)) - require.NoError(t, err) - l2Client, gerL2Addr, gerL2Sc, bridgeL2Addr, bridgeL2Sc, err := newSimulatedEVMAggSovereignChain(authL2) - require.NoError(t, err) - ethTxManMock := NewEthTxManMock(t, l2Client, authL2) - sender, err := chaingersender.NewEVMChainGERSender(log.GetDefaultLogger(), - gerL2Addr, authL2.From, l2Client.Client(), ethTxManMock, 0, time.Millisecond*50) //nolint:mnd - require.NoError(t, err) - - return sender, l2Client, gerL2Sc, gerL2Addr, bridgeL2Sc, bridgeL2Addr, authL2, ethTxManMock -} - -func newSimulatedL1(auth *bind.TransactOpts) ( - client *simulated.Backend, - gerAddr common.Address, - gerContract *gerContractL1.Globalexitrootnopush0, - bridgeAddr common.Address, - bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2, - err error, -) { - ctx := context.Background() - - privateKeyL1, err := crypto.GenerateKey() - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to generate private key: %w", err) - } - - authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(chainID)) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to create transactor: %w", err) - } - - balance, _ := new(big.Int).SetString(initialBalance, 10) //nolint:mnd - address := auth.From - genesisAlloc := map[common.Address]types.Account{ - address: { - Balance: balance, - }, - authDeployer.From: { - Balance: balance, - }, - } - - client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) - - bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client()) - if err != nil { - return nil, common.Address{}, nil, - common.Address{}, nil, - fmt.Errorf("failed to deploy bridge implementation: %w", err) - } - client.Commit() - - nonce, err := client.Client().PendingNonceAt(ctx, authDeployer.From) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get pending nonce: %w", err) - } - precalculatedAddr := crypto.CreateAddress(authDeployer.From, nonce+1) - bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get bridge ABI: %w", err) - } - if bridgeABI == nil { - err = errors.New("GetABI returned nil") - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get bridge ABI: %w", err) - } - - dataCallProxy, err := bridgeABI.Pack("initialize", - uint32(0), // networkIDMainnet - common.Address{}, // gasTokenAddressMainnet" - uint32(0), // gasTokenNetworkMainnet - precalculatedAddr, - common.Address{}, - []byte{}, // gasTokenMetadata - ) - if err != nil { - return nil, common.Address{}, nil, - common.Address{}, nil, - fmt.Errorf("failed to pack data for proxy initialization: %w", err) - } - - bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy( - authDeployer, - client.Client(), - bridgeImplementationAddr, - authDeployer.From, - dataCallProxy, - ) - if err != nil { - return nil, common.Address{}, nil, - common.Address{}, nil, - fmt.Errorf("failed to deploy transparent upgradable proxy: %w", err) - } - client.Commit() - - bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client()) - if err != nil { - return nil, common.Address{}, nil, - common.Address{}, nil, - fmt.Errorf("failed to create bridge contract instance: %w", err) - } - - checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{Pending: false}) - if err != nil { - return nil, common.Address{}, nil, - common.Address{}, nil, - fmt.Errorf("failed to get Global Exit Root Manager: %w", err) - } - if precalculatedAddr != checkGERAddr { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf( - "error deploying bridge, unexpected GER addr. Expected %s. Actual %s", - precalculatedAddr.Hex(), checkGERAddr.Hex(), - ) - } - - gerAddr, _, gerContract, err = gerContractL1.DeployGlobalexitrootnopush0(authDeployer, client.Client(), - auth.From, bridgeAddr) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy GER contract: %w", err) - } - client.Commit() - - if precalculatedAddr != gerAddr { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf( - "error calculating GER address. Expected %s. Actual %s", - precalculatedAddr.Hex(), gerAddr.Hex(), - ) - } - - return client, gerAddr, gerContract, bridgeAddr, bridgeContract, nil -} - -func newSimulatedEVMAggSovereignChain(auth *bind.TransactOpts) ( - client *simulated.Backend, - gerAddr common.Address, - gerContract *gerContractEVMChain.Pessimisticglobalexitrootnopush0, - bridgeAddr common.Address, - bridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2, - err error, -) { - ctx := context.Background() - - privateKeyL1, err := crypto.GenerateKey() - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to generate private key: %w", err) - } - - authDeployer, err := bind.NewKeyedTransactorWithChainID(privateKeyL1, big.NewInt(chainID)) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to create transactor: %w", err) - } - - balance, _ := new(big.Int).SetString(initialBalance, 10) //nolint:mnd - address := auth.From - precalculatedBridgeAddr := crypto.CreateAddress(authDeployer.From, 1) - - genesisAlloc := map[common.Address]types.Account{ - address: { - Balance: balance, - }, - authDeployer.From: { - Balance: balance, - }, - precalculatedBridgeAddr: { - Balance: balance, - }, - } - - const blockGasLimit = uint64(999999999999999999) - client = simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) - - bridgeImplementationAddr, _, _, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(authDeployer, client.Client()) - if err != nil { - return nil, common.Address{}, nil, - common.Address{}, nil, - fmt.Errorf("failed to deploy bridge implementation: %w", err) - } - client.Commit() - - nonce, err := client.Client().PendingNonceAt(ctx, authDeployer.From) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get pending nonce: %w", err) - } - precalculatedAddr := crypto.CreateAddress(authDeployer.From, nonce+1) - - bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get bridge ABI: %w", err) - } - if bridgeABI == nil { - err = errors.New("GetABI returned nil") - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to get bridge ABI: %w", err) - } - - dataCallProxy, err := bridgeABI.Pack("initialize", - NetworkIDL2, - common.Address{}, // gasTokenAddressMainnet" - uint32(0), // gasTokenNetworkMainnet - precalculatedAddr, - common.Address{}, - []byte{}, // gasTokenMetadata - ) - if err != nil { - return nil, common.Address{}, nil, - common.Address{}, nil, - fmt.Errorf("failed to pack data for proxy initialization: %w", err) - } - - bridgeAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy( - authDeployer, - client.Client(), - bridgeImplementationAddr, - authDeployer.From, - dataCallProxy, - ) - if err != nil { - return nil, common.Address{}, nil, - common.Address{}, nil, - fmt.Errorf("failed to deploy transparent upgradable proxy: %w", err) - } - if bridgeAddr != precalculatedBridgeAddr { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf( - "error calculating bridge addr. Expected: %s. Actual: %s", - precalculatedBridgeAddr, bridgeAddr, - ) - } - client.Commit() - - bridgeContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridgeAddr, client.Client()) - if err != nil { - return nil, common.Address{}, nil, - common.Address{}, nil, - fmt.Errorf("failed to create bridge contract instance: %w", err) - } - - checkGERAddr, err := bridgeContract.GlobalExitRootManager(&bind.CallOpts{}) - if err != nil { - return nil, common.Address{}, nil, - common.Address{}, nil, - fmt.Errorf("failed to get Global Exit Root Manager: %w", err) - } - if precalculatedAddr != checkGERAddr { - return nil, common.Address{}, nil, common.Address{}, nil, errors.New( - "error deploying bridge, unexpected GER Manager address", - ) - } - - gerAddr, _, gerContract, err = gerContractEVMChain.DeployPessimisticglobalexitrootnopush0( - authDeployer, client.Client(), auth.From) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to deploy GER contract: %w", err) - } - client.Commit() - - globalExitRootSetterRole := common.HexToHash("0x7b95520991dfda409891be0afa2635b63540f92ee996fda0bf695a166e5c5176") - _, err = gerContract.GrantRole(authDeployer, globalExitRootSetterRole, auth.From) - if err != nil { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to grant role to GER contract: %w", err) - } - client.Commit() - - hasRole, _ := gerContract.HasRole(&bind.CallOpts{Pending: false}, globalExitRootSetterRole, auth.From) - if !hasRole { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("failed to set role for GER contract") - } - - if precalculatedAddr != gerAddr { - return nil, common.Address{}, nil, common.Address{}, nil, fmt.Errorf("error calculating GER address") - } - - return client, gerAddr, gerContract, bridgeAddr, bridgeContract, nil -} diff --git a/test/helpers/simulated.go b/test/helpers/simulated.go index eb4cab20..d85baf92 100644 --- a/test/helpers/simulated.go +++ b/test/helpers/simulated.go @@ -1,8 +1,24 @@ package helpers import ( + "math/big" + "testing" + + "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry-paris/polygonzkevmbridgev2" + "github.com/0xPolygon/cdk/test/contracts/transparentupgradableproxy" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethclient/simulated" "github.com/ethereum/go-ethereum/rpc" + "github.com/stretchr/testify/require" +) + +const ( + defaultBlockGasLimit = uint64(999999999999999999) + defaultBalance = "10000000000000000000000000" + chainID = 1337 ) type ClientRenamed simulated.Client @@ -14,3 +30,106 @@ type TestClient struct { func (tc TestClient) Client() *rpc.Client { return nil } + +// SimulatedBackendSetup defines the setup for a simulated backend. +type SimulatedBackendSetup struct { + UserAuth *bind.TransactOpts + DeployerAuth *bind.TransactOpts + EBZkevmBridgeAddr common.Address + EBZkevmBridgeContract *polygonzkevmbridgev2.Polygonzkevmbridgev2 + EBZkevmBridgeProxyAddr common.Address + EBZkevmBridgeProxyContract *polygonzkevmbridgev2.Polygonzkevmbridgev2 +} + +// SimulatedBackend creates a simulated backend with two accounts: user and deployer. +func SimulatedBackend( + t *testing.T, + balances map[common.Address]types.Account, + ebZkevmBridgeNetwork uint32, +) (*simulated.Backend, *SimulatedBackendSetup) { + t.Helper() + + // Define default balance + balance, ok := new(big.Int).SetString(defaultBalance, 10) //nolint:mnd + require.Truef(t, ok, "failed to set balance") + + // Create user + userPK, err := crypto.GenerateKey() + require.NoError(t, err) + userAuth, err := bind.NewKeyedTransactorWithChainID(userPK, big.NewInt(chainID)) + require.NoError(t, err) + + // Create deployer + deployerPK, err := crypto.GenerateKey() + require.NoError(t, err) + deployerAuth, err := bind.NewKeyedTransactorWithChainID(deployerPK, big.NewInt(chainID)) + require.NoError(t, err) + precalculatedBridgeAddr := crypto.CreateAddress(deployerAuth.From, 1) + + // Define balances map + if balances == nil { + balances = make(map[common.Address]types.Account) + } + balances[userAuth.From] = types.Account{Balance: balance} + balances[deployerAuth.From] = types.Account{Balance: balance} + balances[precalculatedBridgeAddr] = types.Account{Balance: balance} + + client := simulated.NewBackend(balances, simulated.WithBlockGasLimit(defaultBlockGasLimit)) + + // Mine the first block + client.Commit() + + // MUST BE DEPLOYED FIRST + // Deploy zkevm bridge contract + ebZkevmBridgeAddr, _, ebZkevmBridgeContract, err := polygonzkevmbridgev2.DeployPolygonzkevmbridgev2(deployerAuth, client.Client()) + require.NoError(t, err) + client.Commit() + + // Create proxy contract for the bridge + var ebZkevmBridgeProxyAddr common.Address + var ebZkevmBridgeProxyContract *polygonzkevmbridgev2.Polygonzkevmbridgev2 + { + precalculatedAddr := crypto.CreateAddress(deployerAuth.From, 2) //nolint:mnd + + bridgeABI, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() + require.NoError(t, err) + require.NotNil(t, bridgeABI) + + dataCallProxy, err := bridgeABI.Pack("initialize", + ebZkevmBridgeNetwork, + common.Address{}, // gasTokenAddressMainnet + uint32(0), // gasTokenNetworkMainnet + precalculatedAddr, + common.Address{}, + []byte{}, // gasTokenMetadata + ) + require.NoError(t, err) + + ebZkevmBridgeProxyAddr, _, _, err = transparentupgradableproxy.DeployTransparentupgradableproxy( + deployerAuth, + client.Client(), + ebZkevmBridgeAddr, + deployerAuth.From, + dataCallProxy, + ) + require.NoError(t, err) + require.Equal(t, precalculatedBridgeAddr, ebZkevmBridgeProxyAddr) + client.Commit() + + ebZkevmBridgeProxyContract, err = polygonzkevmbridgev2.NewPolygonzkevmbridgev2(ebZkevmBridgeProxyAddr, client.Client()) + require.NoError(t, err) + + checkGERAddr, err := ebZkevmBridgeProxyContract.GlobalExitRootManager(&bind.CallOpts{}) + require.NoError(t, err) + require.Equal(t, precalculatedAddr, checkGERAddr) + } + + return client, &SimulatedBackendSetup{ + UserAuth: userAuth, + DeployerAuth: deployerAuth, + EBZkevmBridgeAddr: ebZkevmBridgeAddr, + EBZkevmBridgeContract: ebZkevmBridgeContract, + EBZkevmBridgeProxyAddr: ebZkevmBridgeProxyAddr, + EBZkevmBridgeProxyContract: ebZkevmBridgeProxyContract, + } +} From 3abdb5ae6d98c651017dadc3b0e2ba6183106de7 Mon Sep 17 00:00:00 2001 From: rbpol Date: Mon, 14 Oct 2024 15:02:27 +0100 Subject: [PATCH 3/4] feat: L1 Info Tree sync testing (#124) --- l1infotreesync/e2e_test.go | 7 +- l1infotreesync/processor.go | 5 +- l1infotreesync/processor_test.go | 146 +++++++++++++++++++++++++++++++ sync/evmdriver.go | 3 + 4 files changed, 156 insertions(+), 5 deletions(-) diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index 94596f23..61e7ff28 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -56,7 +56,7 @@ func newSimulatedClient(t *testing.T) ( } func TestE2E(t *testing.T) { - ctx := context.Background() + ctx, cancelCtx := context.WithCancel(context.Background()) dbPath := path.Join(t.TempDir(), "file::memory:?cache=shared") rdm := l1infotreesync.NewReorgDetectorMock(t) @@ -96,6 +96,11 @@ func TestE2E(t *testing.T) { require.Equal(t, common.Hash(expectedRoot), actualRoot.Hash) } + // Restart syncer + cancelCtx() + ctx = context.Background() + go syncer.Start(ctx) + // Update 3 rollups (verify batches event) 3 times for rollupID := uint32(1); rollupID < 3; rollupID++ { for i := 0; i < 3; i++ { diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index c6a4ef1a..e7115a60 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -248,10 +248,7 @@ func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { return err } - if err := tx.Commit(); err != nil { - return err - } - return nil + return tx.Commit() } // ProcessBlock process the events of the block to build the rollup exit tree and the l1 info tree diff --git a/l1infotreesync/processor_test.go b/l1infotreesync/processor_test.go index b31d2237..52a81ce8 100644 --- a/l1infotreesync/processor_test.go +++ b/l1infotreesync/processor_test.go @@ -5,6 +5,7 @@ import ( "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/sync" + "github.com/0xPolygon/cdk/tree/types" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" "golang.org/x/net/context" @@ -121,3 +122,148 @@ func TestGetLatestInfoUntilBlockIfNotFoundReturnsErrNotFound(t *testing.T) { _, err = sut.GetLatestInfoUntilBlock(ctx, 1) require.Equal(t, db.ErrNotFound, err) } + +func Test_processor_GetL1InfoTreeMerkleProof(t *testing.T) { + t.Parallel() + + testTable := []struct { + name string + getProcessor func(t *testing.T) *processor + idx uint32 + expectedRoot types.Root + expectedErr error + }{ + { + name: "empty tree", + getProcessor: func(t *testing.T) *processor { + t.Helper() + + p, err := newProcessor("file:Test_processor_GetL1InfoTreeMerkleProof_1?mode=memory&cache=shared") + require.NoError(t, err) + + return p + }, + idx: 0, + expectedErr: db.ErrNotFound, + }, + { + name: "single leaf tree", + getProcessor: func(t *testing.T) *processor { + t.Helper() + + p, err := newProcessor("file:Test_processor_GetL1InfoTreeMerkleProof_2?mode=memory&cache=shared") + require.NoError(t, err) + + info := &UpdateL1InfoTree{ + MainnetExitRoot: common.HexToHash("beef"), + RollupExitRoot: common.HexToHash("5ca1e"), + ParentHash: common.HexToHash("1010101"), + Timestamp: 420, + } + err = p.ProcessBlock(context.Background(), sync.Block{ + Num: 1, + Events: []interface{}{ + Event{UpdateL1InfoTree: info}, + }, + }) + require.NoError(t, err) + + return p + }, + idx: 0, + expectedRoot: types.Root{ + Hash: common.HexToHash("beef"), + Index: 0, + BlockNum: 1, + BlockPosition: 0, + }, + }, + } + + for _, tt := range testTable { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + p := tt.getProcessor(t) + proof, root, err := p.GetL1InfoTreeMerkleProof(context.Background(), tt.idx) + if tt.expectedErr != nil { + require.Equal(t, tt.expectedErr, err) + } else { + require.NoError(t, err) + require.NotEmpty(t, proof) + require.NotEmpty(t, root.Hash) + require.Equal(t, tt.expectedRoot.Index, root.Index) + require.Equal(t, tt.expectedRoot.BlockNum, root.BlockNum) + require.Equal(t, tt.expectedRoot.BlockPosition, root.BlockPosition) + } + }) + } +} + +func Test_processor_Reorg(t *testing.T) { + t.Parallel() + + testTable := []struct { + name string + getProcessor func(t *testing.T) *processor + reorgBlock uint64 + expectedErr error + }{ + { + name: "empty tree", + getProcessor: func(t *testing.T) *processor { + t.Helper() + + p, err := newProcessor("file:Test_processor_Reorg_1?mode=memory&cache=shared") + require.NoError(t, err) + return p + }, + reorgBlock: 0, + expectedErr: nil, + }, + { + name: "single leaf tree", + getProcessor: func(t *testing.T) *processor { + t.Helper() + + p, err := newProcessor("file:Test_processor_Reorg_2?mode=memory&cache=shared") + require.NoError(t, err) + + info := &UpdateL1InfoTree{ + MainnetExitRoot: common.HexToHash("beef"), + RollupExitRoot: common.HexToHash("5ca1e"), + ParentHash: common.HexToHash("1010101"), + Timestamp: 420, + } + err = p.ProcessBlock(context.Background(), sync.Block{ + Num: 1, + Events: []interface{}{ + Event{UpdateL1InfoTree: info}, + }, + }) + require.NoError(t, err) + + return p + }, + reorgBlock: 1, + }, + } + + for _, tt := range testTable { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + p := tt.getProcessor(t) + err := p.Reorg(context.Background(), tt.reorgBlock) + if tt.expectedErr != nil { + require.Equal(t, tt.expectedErr, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/sync/evmdriver.go b/sync/evmdriver.go index 7865f645..52eaaaae 100644 --- a/sync/evmdriver.go +++ b/sync/evmdriver.go @@ -92,6 +92,9 @@ reset: for { select { + case <-ctx.Done(): + d.log.Info("sync stopped due to context done") + return case b := <-downloadCh: d.log.Debug("handleNewBlock", " blockNum: ", b.Num, " blockHash: ", b.Hash) d.handleNewBlock(ctx, b) From 8e2015f7acf3a2279f2a6e5cc4df536723fdf6a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= <58293609+ToniRamirezM@users.noreply.github.com> Date: Mon, 14 Oct 2024 16:23:30 +0200 Subject: [PATCH 4/4] feat: warning on agglayer rate limit (#122) * feat: retry on agglayer rate limit exceeded --- aggregator/agglayer/agglayer_client.go | 7 +++++++ aggregator/aggregator.go | 7 +++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/aggregator/agglayer/agglayer_client.go b/aggregator/agglayer/agglayer_client.go index dbe48fb2..a5222571 100644 --- a/aggregator/agglayer/agglayer_client.go +++ b/aggregator/agglayer/agglayer_client.go @@ -13,6 +13,10 @@ import ( "github.com/ethereum/go-ethereum/common" ) +const errCodeAgglayerRateLimitExceeded int = -10007 + +var ErrAgglayerRateLimitExceeded = fmt.Errorf("agglayer rate limit exceeded") + // AgglayerClientInterface is the interface that defines the methods that the AggLayerClient will implement type AgglayerClientInterface interface { SendTx(signedTx SignedTx) (common.Hash, error) @@ -39,6 +43,9 @@ func (c *AggLayerClient) SendTx(signedTx SignedTx) (common.Hash, error) { } if response.Error != nil { + if response.Error.Code == errCodeAgglayerRateLimitExceeded { + return common.Hash{}, ErrAgglayerRateLimitExceeded + } return common.Hash{}, fmt.Errorf("%v %v", response.Error.Code, response.Error.Message) } diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 4d887136..2003f0e2 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -945,9 +945,12 @@ func (a *Aggregator) settleWithAggLayer( a.logger.Debug("final proof signedTx: ", signedTx.Tx.ZKP.Proof.Hex()) txHash, err := a.aggLayerClient.SendTx(*signedTx) if err != nil { - a.logger.Errorf("failed to send tx to the agglayer: %v", err) + if errors.Is(err, agglayer.ErrAgglayerRateLimitExceeded) { + a.logger.Errorf("%s. Config param VerifyProofInterval should match the agglayer configured rate limit.", err) + } else { + a.logger.Errorf("failed to send tx to the agglayer: %v", err) + } a.handleFailureToAddVerifyBatchToBeMonitored(ctx, proof) - return false }