Skip to content

Commit

Permalink
decouple sync processors from EVM
Browse files Browse the repository at this point in the history
  • Loading branch information
arnaubennassar committed Jul 25, 2024
1 parent 52b47c9 commit 78ad2dc
Show file tree
Hide file tree
Showing 14 changed files with 180 additions and 137 deletions.
2 changes: 1 addition & 1 deletion aggoracle/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func commonSetup(t *testing.T) (
require.NoError(t, err)
// Syncer
dbPathSyncer := t.TempDir()
syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerL1Addr, 10, etherman.LatestBlock, reorg, l1Client.Client(), 32, time.Millisecond)
syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerL1Addr, 10, etherman.LatestBlock, reorg, l1Client.Client(), 32, time.Millisecond, 0)
require.NoError(t, err)
go syncer.Start(ctx)

Expand Down
26 changes: 19 additions & 7 deletions common/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,27 @@ package common

import "encoding/binary"

// BlockNum2Bytes converts a block number to a byte slice
func BlockNum2Bytes(blockNum uint64) []byte {
key := make([]byte, 8)
binary.LittleEndian.PutUint64(key, blockNum)
// Uint64To2Bytes converts a block number to a byte slice
func Uint64To2Bytes(num uint64) []byte {
bytes := make([]byte, 8)
binary.LittleEndian.PutUint64(bytes, num)

return bytes
}

// BytesToUint64 converts a byte slice to a block number
func BytesToUint64(bytes []byte) uint64 {
return binary.LittleEndian.Uint64(bytes)
}

// Uint32To2Bytes converts a block number to a byte slice
func Uint32ToBytes(num uint32) []byte {
key := make([]byte, 4)
binary.LittleEndian.PutUint32(key, num)
return key
}

// Bytes2BlockNum converts a byte slice to a block number
func Bytes2BlockNum(key []byte) uint64 {
return binary.LittleEndian.Uint64(key)
// BytesToUint32 converts a byte slice to a block number
func BytesToUint32(bytes []byte) uint32 {
return binary.LittleEndian.Uint32(bytes)
}
9 changes: 3 additions & 6 deletions l1infotreesync/downloader.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,6 @@ type EthClienter interface {
bind.ContractBackend
}

type L1InfoTreeUpdate struct {
MainnetExitRoot common.Hash
RollupExitRoot common.Hash
}

func buildAppender(client EthClienter, globalExitRoot common.Address) (sync.LogAppenderMap, error) {
contract, err := polygonzkevmglobalexitrootv2.NewPolygonzkevmglobalexitrootv2(globalExitRoot, client)
if err != nil {
Expand All @@ -42,9 +37,11 @@ func buildAppender(client EthClienter, globalExitRoot common.Address) (sync.LogA
l, err,
)
}
b.Events = append(b.Events, L1InfoTreeUpdate{
b.Events = append(b.Events, Event{
MainnetExitRoot: l1InfoTreeUpdate.MainnetExitRoot,
RollupExitRoot: l1InfoTreeUpdate.RollupExitRoot,
ParentHash: b.ParentHash,
Timestamp: b.Timestamp,
})
return nil
}
Expand Down
2 changes: 1 addition & 1 deletion l1infotreesync/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func TestE2E(t *testing.T) {
rdm.On("AddBlockToTrack", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
client, gerAddr, gerSc, err := newSimulatedClient(auth)
require.NoError(t, err)
syncer, err := New(ctx, dbPath, gerAddr, 10, etherman.LatestBlock, rdm, client.Client(), 32, time.Millisecond)
syncer, err := New(ctx, dbPath, gerAddr, 10, etherman.LatestBlock, rdm, client.Client(), 32, time.Millisecond, 0)
require.NoError(t, err)
go syncer.Start(ctx)

Expand Down
14 changes: 14 additions & 0 deletions l1infotreesync/l1infotreesync.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,25 @@ func New(
l1Client EthClienter,
treeHeight uint8,
waitForNewBlocksPeriod time.Duration,
initialBlock uint64,
) (*L1InfoTreeSync, error) {
processor, err := newProcessor(ctx, dbPath, treeHeight)
if err != nil {
return nil, err
}
// TODO: get the initialBlock from L1 to simplify config
lastProcessedBlock, err := processor.GetLastProcessedBlock(ctx)
if err != nil {
return nil, err
}
if lastProcessedBlock < initialBlock {
err = processor.ProcessBlock(sync.Block{
Num: initialBlock,
})
if err != nil {
return nil, err
}
}

appender, err := buildAppender(l1Client, globalExitRoot)
if err != nil {
Expand Down
70 changes: 34 additions & 36 deletions l1infotreesync/processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,10 @@ import (
"encoding/json"
"errors"

"github.com/0xPolygon/cdk/common"
"github.com/0xPolygon/cdk/l1infotree"
"github.com/0xPolygon/cdk/sync"
"github.com/ethereum/go-ethereum/common"
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/mdbx"
"golang.org/x/crypto/sha3"
Expand All @@ -33,22 +34,29 @@ type processor struct {
tree *l1infotree.L1InfoTree
}

type Event struct {
MainnetExitRoot ethCommon.Hash
RollupExitRoot ethCommon.Hash
ParentHash ethCommon.Hash
Timestamp uint64
}

type L1InfoTreeLeaf struct {
L1InfoTreeRoot common.Hash
L1InfoTreeRoot ethCommon.Hash
L1InfoTreeIndex uint32
PreviousBlockHash common.Hash
PreviousBlockHash ethCommon.Hash
BlockNumber uint64
Timestamp uint64
MainnetExitRoot common.Hash
RollupExitRoot common.Hash
GlobalExitRoot common.Hash
MainnetExitRoot ethCommon.Hash
RollupExitRoot ethCommon.Hash
GlobalExitRoot ethCommon.Hash
}

type storeLeaf struct {
MainnetExitRoot common.Hash
RollupExitRoot common.Hash
ParentHash common.Hash
InfoRoot common.Hash
MainnetExitRoot ethCommon.Hash
RollupExitRoot ethCommon.Hash
ParentHash ethCommon.Hash
InfoRoot ethCommon.Hash
Index uint32
Timestamp uint64
BlockNumber uint64
Expand All @@ -61,7 +69,7 @@ type blockWithLeafs struct {
LastIndex uint32
}

func (l *storeLeaf) GlobalExitRoot() common.Hash {
func (l *storeLeaf) GlobalExitRoot() ethCommon.Hash {
var gerBytes [32]byte
hasher := sha3.NewLegacyKeccak256()
hasher.Write(l.MainnetExitRoot[:])
Expand Down Expand Up @@ -122,19 +130,19 @@ func (p *processor) getAllLeavesHashed(ctx context.Context) ([][32]byte, error)
return p.getHasedLeaves(tx, index)
}

func (p *processor) ComputeMerkleProofByIndex(ctx context.Context, index uint32) ([][32]byte, common.Hash, error) {
func (p *processor) ComputeMerkleProofByIndex(ctx context.Context, index uint32) ([][32]byte, ethCommon.Hash, error) {
// TODO: refactor the tree to store the nodes so it's not neede to load all the leaves and compute the tree
// every time this function is called. Since it's not a sparse MT, an alternative could be to store the proofs
// as part of the info
tx, err := p.db.BeginRo(ctx)
if err != nil {
return nil, common.Hash{}, err
return nil, ethCommon.Hash{}, err
}
defer tx.Rollback()

leaves, err := p.getHasedLeaves(tx, index)
if err != nil {
return nil, common.Hash{}, err
return nil, ethCommon.Hash{}, err
}
return p.tree.ComputeMerkleProof(index, leaves)
}
Expand All @@ -152,15 +160,15 @@ func (p *processor) getHasedLeaves(tx kv.Tx, untilIndex uint32) ([][32]byte, err
return leaves, nil
}

func (p *processor) ComputeMerkleProofByRoot(ctx context.Context, root common.Hash) ([][32]byte, common.Hash, error) {
func (p *processor) ComputeMerkleProofByRoot(ctx context.Context, root ethCommon.Hash) ([][32]byte, ethCommon.Hash, error) {
info, err := p.GetInfoByRoot(ctx, root)
if err != nil {
return nil, common.Hash{}, err
return nil, ethCommon.Hash{}, err
}
return p.ComputeMerkleProofByIndex(ctx, info.L1InfoTreeIndex)
}

func (p *processor) GetInfoByRoot(ctx context.Context, root common.Hash) (*L1InfoTreeLeaf, error) {
func (p *processor) GetInfoByRoot(ctx context.Context, root ethCommon.Hash) (*L1InfoTreeLeaf, error) {
tx, err := p.db.BeginRo(ctx)
if err != nil {
return nil, err
Expand Down Expand Up @@ -205,7 +213,7 @@ func (p *processor) GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64
if err := json.Unmarshal(v, &blk); err != nil {
return nil, err
}
hash, err := tx.GetOne(indexTable, uint32ToBytes(blk.LastIndex-1))
hash, err := tx.GetOne(indexTable, common.Uint32ToBytes(blk.LastIndex-1))
if err != nil {
return nil, err
}
Expand All @@ -225,7 +233,7 @@ func (p *processor) GetInfoByIndex(ctx context.Context, index uint32) (*L1InfoTr
}

func (p *processor) getInfoByIndexWithTx(tx kv.Tx, index uint32) (*L1InfoTreeLeaf, error) {
hash, err := tx.GetOne(indexTable, uint32ToBytes(index))
hash, err := tx.GetOne(indexTable, common.Uint32ToBytes(index))
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -331,7 +339,7 @@ func (p *processor) Reorg(firstReorgedBlock uint64) error {
func (p *processor) deleteLeaf(tx kv.RwTx, index uint32) error {
// TODO: do we need to do something with p.tree here?
// Get leaf info to delete all relations
hash, err := tx.GetOne(indexTable, uint32ToBytes(index))
hash, err := tx.GetOne(indexTable, common.Uint32ToBytes(index))
if err != nil {
return err
}
Expand All @@ -354,7 +362,7 @@ func (p *processor) deleteLeaf(tx kv.RwTx, index uint32) error {
if err := tx.Delete(rootTable, info.InfoRoot[:]); err != nil {
return err
}
if err := tx.Delete(indexTable, uint32ToBytes(index)); err != nil {
if err := tx.Delete(indexTable, common.Uint32ToBytes(index)); err != nil {
return err
}
if err := tx.Delete(infoTable, hash); err != nil {
Expand All @@ -365,7 +373,7 @@ func (p *processor) deleteLeaf(tx kv.RwTx, index uint32) error {

// ProcessBlock process the leafs of the L1 info tree found on a block
// this function can be called without leafs with the intention to track the last processed block
func (p *processor) ProcessBlock(b sync.EVMBlock) error {
func (p *processor) ProcessBlock(b sync.Block) error {
tx, err := p.db.BeginRw(context.Background())
if err != nil {
return err
Expand All @@ -382,13 +390,13 @@ func (p *processor) ProcessBlock(b sync.EVMBlock) error {
initialIndex = lastIndex + 1
}
for i, e := range b.Events {
event := e.(L1InfoTreeUpdate)
event := e.(Event)
leafToStore := storeLeaf{
Index: initialIndex + uint32(i),
MainnetExitRoot: event.MainnetExitRoot,
RollupExitRoot: event.RollupExitRoot,
ParentHash: b.ParentHash,
Timestamp: b.Timestamp,
ParentHash: event.ParentHash,
Timestamp: event.Timestamp,
BlockNumber: b.Num,
}
if err := p.addLeaf(tx, leafToStore); err != nil {
Expand Down Expand Up @@ -460,7 +468,7 @@ func (p *processor) addLeaf(tx kv.RwTx, leaf storeLeaf) error {
return err
}
// store index relation
if err := tx.Put(indexTable, uint32ToBytes(leaf.Index), hash[:]); err != nil {
if err := tx.Put(indexTable, common.Uint32ToBytes(leaf.Index), hash[:]); err != nil {
return err
}
// store root relation
Expand All @@ -484,13 +492,3 @@ func uint64ToBytes(num uint64) []byte {
func bytes2Uint64(key []byte) uint64 {
return binary.LittleEndian.Uint64(key)
}

func uint32ToBytes(num uint32) []byte {
key := make([]byte, 4)
binary.LittleEndian.PutUint32(key, num)
return key
}

func bytes2Uint32(key []byte) uint32 {
return binary.LittleEndian.Uint32(key)
}
30 changes: 3 additions & 27 deletions localbridgesync/downloader.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,30 +32,6 @@ type EthClienter interface {
bind.ContractBackend
}

type Bridge struct {
LeafType uint8
OriginNetwork uint32
OriginAddress common.Address
DestinationNetwork uint32
DestinationAddress common.Address
Amount *big.Int
Metadata []byte
DepositCount uint32
}

type Claim struct {
GlobalIndex *big.Int
OriginNetwork uint32
OriginAddress common.Address
DestinationAddress common.Address
Amount *big.Int
}

type BridgeEvent struct {
Bridge *Bridge
Claim *Claim
}

func buildAppender(client EthClienter, bridge common.Address) (sync.LogAppenderMap, error) {
bridgeContractV1, err := polygonzkevmbridge.NewPolygonzkevmbridge(bridge, client)
if err != nil {
Expand All @@ -75,7 +51,7 @@ func buildAppender(client EthClienter, bridge common.Address) (sync.LogAppenderM
l, err,
)
}
b.Events = append(b.Events, BridgeEvent{Bridge: &Bridge{
b.Events = append(b.Events, Event{Bridge: &Bridge{
LeafType: bridge.LeafType,
OriginNetwork: bridge.OriginNetwork,
OriginAddress: bridge.OriginAddress,
Expand All @@ -96,7 +72,7 @@ func buildAppender(client EthClienter, bridge common.Address) (sync.LogAppenderM
l, err,
)
}
b.Events = append(b.Events, BridgeEvent{Claim: &Claim{
b.Events = append(b.Events, Event{Claim: &Claim{
GlobalIndex: claim.GlobalIndex,
OriginNetwork: claim.OriginNetwork,
OriginAddress: claim.OriginAddress,
Expand All @@ -114,7 +90,7 @@ func buildAppender(client EthClienter, bridge common.Address) (sync.LogAppenderM
l, err,
)
}
b.Events = append(b.Events, BridgeEvent{Claim: &Claim{
b.Events = append(b.Events, Event{Claim: &Claim{
GlobalIndex: big.NewInt(int64(claim.Index)),
OriginNetwork: claim.OriginNetwork,
OriginAddress: claim.OriginAddress,
Expand Down
14 changes: 14 additions & 0 deletions localbridgesync/localbridgesync.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,17 +25,31 @@ type LocalBridgeSync struct {
}

func New(
ctx context.Context,
dbPath string,
bridge common.Address,
syncBlockChunkSize uint64,
blockFinalityType etherman.BlockNumberFinality,
rd sync.ReorgDetector,
l2Client EthClienter,
initialBlock uint64,
) (*LocalBridgeSync, error) {
processor, err := newProcessor(dbPath)
if err != nil {
return nil, err
}
lastProcessedBlock, err := processor.GetLastProcessedBlock(ctx)
if err != nil {
return nil, err
}
if lastProcessedBlock < initialBlock {
err = processor.ProcessBlock(sync.Block{
Num: initialBlock,
})
if err != nil {
return nil, err
}
}

appender, err := buildAppender(l2Client, bridge)
if err != nil {
Expand Down
Loading

0 comments on commit 78ad2dc

Please sign in to comment.