diff --git a/catchup/catchpointService_test.go b/catchup/catchpointService_test.go index 39cef9f2b9..0487fb975f 100644 --- a/catchup/catchpointService_test.go +++ b/catchup/catchpointService_test.go @@ -81,8 +81,8 @@ func (m *catchpointCatchupAccessorMock) Ledger() (l ledger.CatchupAccessorClient } // GetVerifyData returns the balances hash, spver hash and totals used by VerifyCatchpoint -func (m *catchpointCatchupAccessorMock) GetVerifyData(ctx context.Context) (balancesHash crypto.Digest, spverHash crypto.Digest, totals ledgercore.AccountTotals, err error) { - return crypto.Digest{}, crypto.Digest{}, ledgercore.AccountTotals{}, nil +func (m *catchpointCatchupAccessorMock) GetVerifyData(ctx context.Context) (balancesHash, spverHash, onlineAccountsHash, onlineRoundParamsHash crypto.Digest, totals ledgercore.AccountTotals, err error) { + return crypto.Digest{}, crypto.Digest{}, crypto.Digest{}, crypto.Digest{}, ledgercore.AccountTotals{}, nil } // TestCatchpointServicePeerRank ensures CatchpointService does not crash when a block fetched diff --git a/cmd/catchpointdump/file.go b/cmd/catchpointdump/file.go index f0f7c7bff5..eeda7f25ec 100644 --- a/cmd/catchpointdump/file.go +++ b/cmd/catchpointdump/file.go @@ -214,12 +214,13 @@ func loadCatchpointIntoDatabase(ctx context.Context, catchupAccessor ledger.Catc if err != nil { return fileHeader, err } - var balanceHash, spverHash crypto.Digest - balanceHash, spverHash, _, err = catchupAccessor.GetVerifyData(ctx) + var balanceHash, spverHash, onlineAccountsHash, onlineRoundParamsHash crypto.Digest + balanceHash, spverHash, onlineAccountsHash, onlineRoundParamsHash, _, err = catchupAccessor.GetVerifyData(ctx) if err != nil { return fileHeader, err } - fmt.Printf("accounts digest=%s, spver digest=%s\n\n", balanceHash, spverHash) + fmt.Printf("accounts digest=%s, spver digest=%s, onlineaccounts digest=%s onlineroundparams digest=%s\n\n", + balanceHash, spverHash, onlineAccountsHash, onlineRoundParamsHash) } return fileHeader, nil } diff --git a/components/mocks/mockCatchpointCatchupAccessor.go b/components/mocks/mockCatchpointCatchupAccessor.go index edf7946743..1d7355b3b7 100644 --- a/components/mocks/mockCatchpointCatchupAccessor.go +++ b/components/mocks/mockCatchpointCatchupAccessor.go @@ -71,8 +71,8 @@ func (m *MockCatchpointCatchupAccessor) GetCatchupBlockRound(ctx context.Context } // GetVerifyData returns the balances hash, spver hash and totals used by VerifyCatchpoint -func (m *MockCatchpointCatchupAccessor) GetVerifyData(ctx context.Context) (balancesHash crypto.Digest, spverHash crypto.Digest, totals ledgercore.AccountTotals, err error) { - return crypto.Digest{}, crypto.Digest{}, ledgercore.AccountTotals{}, nil +func (m *MockCatchpointCatchupAccessor) GetVerifyData(ctx context.Context) (balancesHash, spverHash, onlineAccountsHash, onlineRoundParams crypto.Digest, totals ledgercore.AccountTotals, err error) { + return crypto.Digest{}, crypto.Digest{}, crypto.Digest{}, crypto.Digest{}, ledgercore.AccountTotals{}, nil } // VerifyCatchpoint verifies that the catchpoint is valid by reconstructing the label. diff --git a/config/consensus.go b/config/consensus.go index b153848230..7e79e9b8e5 100644 --- a/config/consensus.go +++ b/config/consensus.go @@ -514,6 +514,10 @@ type ConsensusParams struct { // Version 7 includes state proof verification contexts EnableCatchpointsWithSPContexts bool + // EnableCatchpointsWithOnlineAccounts specifies when to enable version 8 catchpoints. + // Version 8 includes onlineaccounts and onlineroundparams amounts, for historical stake lookups. + EnableCatchpointsWithOnlineAccounts bool + // AppForbidLowResources enforces a rule that prevents apps from accessing // asas and apps below 256, in an effort to decrease the ambiguity of // opcodes that accept IDs or slot indexes. Simultaneously, the first ID @@ -1537,6 +1541,8 @@ func initConsensusProtocols() { vFuture.Heartbeat = true + vFuture.EnableCatchpointsWithOnlineAccounts = true + Consensus[protocol.ConsensusFuture] = vFuture // vAlphaX versions are an separate series of consensus parameters and versions for alphanet diff --git a/ledger/catchpointfileheader.go b/ledger/catchpointfileheader.go index f076f7267c..8fd75df135 100644 --- a/ledger/catchpointfileheader.go +++ b/ledger/catchpointfileheader.go @@ -27,13 +27,15 @@ import ( type CatchpointFileHeader struct { _struct struct{} `codec:",omitempty,omitemptyarray"` - Version uint64 `codec:"version"` - BalancesRound basics.Round `codec:"balancesRound"` - BlocksRound basics.Round `codec:"blocksRound"` - Totals ledgercore.AccountTotals `codec:"accountTotals"` - TotalAccounts uint64 `codec:"accountsCount"` - TotalChunks uint64 `codec:"chunksCount"` - TotalKVs uint64 `codec:"kvsCount"` - Catchpoint string `codec:"catchpoint"` - BlockHeaderDigest crypto.Digest `codec:"blockHeaderDigest"` + Version uint64 `codec:"version"` + BalancesRound basics.Round `codec:"balancesRound"` + BlocksRound basics.Round `codec:"blocksRound"` + Totals ledgercore.AccountTotals `codec:"accountTotals"` + TotalAccounts uint64 `codec:"accountsCount"` + TotalChunks uint64 `codec:"chunksCount"` + TotalKVs uint64 `codec:"kvsCount"` + TotalOnlineAccounts uint64 `codec:"onlineAccountsCount"` + TotalOnlineRoundParams uint64 `codec:"onlineRoundParamsCount"` + Catchpoint string `codec:"catchpoint"` + BlockHeaderDigest crypto.Digest `codec:"blockHeaderDigest"` } diff --git a/ledger/catchpointfilewriter.go b/ledger/catchpointfilewriter.go index cd58606a2f..01e78a59eb 100644 --- a/ledger/catchpointfilewriter.go +++ b/ledger/catchpointfilewriter.go @@ -51,33 +51,29 @@ const ( // the writing is complete. It might take multiple steps until the operation is over, and the caller // has the option of throttling the CPU utilization in between the calls. type catchpointFileWriter struct { - ctx context.Context - tx trackerdb.SnapshotScope - filePath string - totalAccounts uint64 - totalKVs uint64 - file *os.File - tar *tar.Writer - compressor io.WriteCloser - chunk catchpointFileChunkV6 - chunkNum uint64 - writtenBytes int64 - biggestChunkLen uint64 - accountsIterator accountsBatchIter - maxResourcesPerChunk int - accountsDone bool - kvRows kvIter -} - -type kvIter interface { - Next() bool - KeyValue() ([]byte, []byte, error) - Close() -} - -type accountsBatchIter interface { - Next(ctx context.Context, accountCount int, resourceCount int) ([]encoded.BalanceRecordV6, uint64, error) - Close() + ctx context.Context + tx trackerdb.SnapshotScope + filePath string + totalAccounts uint64 + totalKVs uint64 + totalOnlineAccounts uint64 + totalOnlineRoundParams uint64 + file *os.File + tar *tar.Writer + compressor io.WriteCloser + chunk catchpointFileChunkV6 + chunkNum uint64 + writtenBytes int64 + biggestChunkLen uint64 + accountsIterator trackerdb.EncodedAccountsBatchIter + maxResourcesPerChunk int + accountsDone bool + kvRows trackerdb.KVsIter + kvDone bool + onlineAccountRows trackerdb.TableIterator[*encoded.OnlineAccountRecordV6] + onlineAccountsDone bool + onlineRoundParamsRows trackerdb.TableIterator[*encoded.OnlineRoundParamsRecordV6] + onlineRoundParamsDone bool } type catchpointFileBalancesChunkV5 struct { @@ -88,13 +84,15 @@ type catchpointFileBalancesChunkV5 struct { type catchpointFileChunkV6 struct { _struct struct{} `codec:",omitempty,omitemptyarray"` - Balances []encoded.BalanceRecordV6 `codec:"bl,allocbound=BalancesPerCatchpointFileChunk"` - numAccounts uint64 - KVs []encoded.KVRecordV6 `codec:"kv,allocbound=BalancesPerCatchpointFileChunk"` + Balances []encoded.BalanceRecordV6 `codec:"bl,allocbound=BalancesPerCatchpointFileChunk"` + numAccounts uint64 + KVs []encoded.KVRecordV6 `codec:"kv,allocbound=BalancesPerCatchpointFileChunk"` + OnlineAccounts []encoded.OnlineAccountRecordV6 `codec:"oa,allocbound=BalancesPerCatchpointFileChunk"` + OnlineRoundParams []encoded.OnlineRoundParamsRecordV6 `codec:"orp,allocbound=BalancesPerCatchpointFileChunk"` } func (chunk catchpointFileChunkV6) empty() bool { - return len(chunk.Balances) == 0 && len(chunk.KVs) == 0 + return len(chunk.Balances) == 0 && len(chunk.KVs) == 0 && len(chunk.OnlineAccounts) == 0 && len(chunk.OnlineRoundParams) == 0 } type catchpointStateProofVerificationContext struct { @@ -122,6 +120,16 @@ func makeCatchpointFileWriter(ctx context.Context, filePath string, tx trackerdb return nil, err } + totalOnlineAccounts, err := aw.TotalOnlineAccountRows(ctx) + if err != nil { + return nil, err + } + + totalOnlineRoundParams, err := aw.TotalOnlineRoundParams(ctx) + if err != nil { + return nil, err + } + err = os.MkdirAll(filepath.Dir(filePath), 0700) if err != nil { return nil, err @@ -137,16 +145,18 @@ func makeCatchpointFileWriter(ctx context.Context, filePath string, tx trackerdb tar := tar.NewWriter(compressor) res := &catchpointFileWriter{ - ctx: ctx, - tx: tx, - filePath: filePath, - totalAccounts: totalAccounts, - totalKVs: totalKVs, - file: file, - compressor: compressor, - tar: tar, - accountsIterator: tx.MakeEncodedAccoutsBatchIter(), - maxResourcesPerChunk: maxResourcesPerChunk, + ctx: ctx, + tx: tx, + filePath: filePath, + totalAccounts: totalAccounts, + totalKVs: totalKVs, + totalOnlineAccounts: totalOnlineAccounts, + totalOnlineRoundParams: totalOnlineRoundParams, + file: file, + compressor: compressor, + tar: tar, + accountsIterator: tx.MakeEncodedAccountsBatchIter(), + maxResourcesPerChunk: maxResourcesPerChunk, } return res, nil } @@ -233,6 +243,14 @@ func (cw *catchpointFileWriter) FileWriteStep(stepCtx context.Context) (more boo cw.kvRows.Close() cw.kvRows = nil } + if cw.onlineAccountRows != nil { + cw.onlineAccountRows.Close() + cw.onlineAccountRows = nil + } + if cw.onlineRoundParamsRows != nil { + cw.onlineRoundParamsRows.Close() + cw.onlineRoundParamsRows = nil + } } }() @@ -323,27 +341,94 @@ func (cw *catchpointFileWriter) readDatabaseStep(ctx context.Context) error { cw.accountsDone = true } - // Create the *Rows iterator JIT - if cw.kvRows == nil { - rows, err := cw.tx.MakeKVsIter(ctx) - if err != nil { - return err + // Create the kvRows iterator JIT + if !cw.kvDone { + if cw.kvRows == nil { + rows, err := cw.tx.MakeKVsIter(ctx) + if err != nil { + return err + } + cw.kvRows = rows + } + + kvrs := make([]encoded.KVRecordV6, 0, BalancesPerCatchpointFileChunk) + for cw.kvRows.Next() { + k, v, err := cw.kvRows.KeyValue() + if err != nil { + return err + } + kvrs = append(kvrs, encoded.KVRecordV6{Key: k, Value: v}) + if len(kvrs) == BalancesPerCatchpointFileChunk { + break + } + } + if len(kvrs) > 0 { + cw.chunk = catchpointFileChunkV6{KVs: kvrs} + return nil } - cw.kvRows = rows + // Do not close kvRows here, or it will start over on the next iteration + cw.kvDone = true } - kvrs := make([]encoded.KVRecordV6, 0, BalancesPerCatchpointFileChunk) - for cw.kvRows.Next() { - k, v, err := cw.kvRows.KeyValue() - if err != nil { - return err + if !cw.onlineAccountsDone { + // Create the OnlineAccounts iterator JIT + if cw.onlineAccountRows == nil { + rows, err := cw.tx.MakeOnlineAccountsIter(ctx) + if err != nil { + return err + } + cw.onlineAccountRows = rows } - kvrs = append(kvrs, encoded.KVRecordV6{Key: k, Value: v}) - if len(kvrs) == BalancesPerCatchpointFileChunk { - break + + onlineAccts := make([]encoded.OnlineAccountRecordV6, 0, BalancesPerCatchpointFileChunk) + for cw.onlineAccountRows.Next() { + oa, err := cw.onlineAccountRows.GetItem() + if err != nil { + return err + } + onlineAccts = append(onlineAccts, *oa) + if len(onlineAccts) == BalancesPerCatchpointFileChunk { + break + } + } + if len(onlineAccts) > 0 { + cw.chunk = catchpointFileChunkV6{OnlineAccounts: onlineAccts} + return nil + } + // Do not close onlineAccountRows here, or it will start over on the next iteration + cw.onlineAccountsDone = true + } + + if !cw.onlineRoundParamsDone { + // Create the OnlineRoundParams iterator JIT + if cw.onlineRoundParamsRows == nil { + rows, err := cw.tx.MakeOnlineRoundParamsIter(ctx) + if err != nil { + return err + } + cw.onlineRoundParamsRows = rows } + + onlineRndParams := make([]encoded.OnlineRoundParamsRecordV6, 0, BalancesPerCatchpointFileChunk) + for cw.onlineRoundParamsRows.Next() { + or, err := cw.onlineRoundParamsRows.GetItem() + if err != nil { + return err + } + onlineRndParams = append(onlineRndParams, *or) + if len(onlineRndParams) == BalancesPerCatchpointFileChunk { + break + } + } + if len(onlineRndParams) > 0 { + cw.chunk = catchpointFileChunkV6{OnlineRoundParams: onlineRndParams} + return nil + } + // Do not close onlineRndParamsRows here, or it will start over on the next iteration + cw.onlineRoundParamsDone = true } - cw.chunk = catchpointFileChunkV6{KVs: kvrs} + + // Finished the last chunk return nil } diff --git a/ledger/catchpointfilewriter_test.go b/ledger/catchpointfilewriter_test.go index 499adeedc8..553942ad0b 100644 --- a/ledger/catchpointfilewriter_test.go +++ b/ledger/catchpointfilewriter_test.go @@ -22,6 +22,7 @@ import ( "compress/gzip" "context" "database/sql" + "encoding/binary" "fmt" "io" "os" @@ -297,8 +298,7 @@ func TestBasicCatchpointWriter(t *testing.T) { } func testWriteCatchpoint(t *testing.T, rdb trackerdb.Store, datapath string, filepath string, maxResourcesPerChunk int) CatchpointFileHeader { - var totalAccounts uint64 - var totalChunks uint64 + var totalAccounts, totalKVs, totalOnlineAccounts, totalOnlineRoundParams, totalChunks uint64 var biggestChunkLen uint64 var accountsRnd basics.Round var totals ledgercore.AccountTotals @@ -333,6 +333,9 @@ func testWriteCatchpoint(t *testing.T, rdb trackerdb.Store, datapath string, fil } } totalAccounts = writer.totalAccounts + totalKVs = writer.totalKVs + totalOnlineAccounts = writer.totalOnlineAccounts + totalOnlineRoundParams = writer.totalOnlineRoundParams totalChunks = writer.chunkNum biggestChunkLen = writer.biggestChunkLen accountsRnd, err = ar.AccountsRound() @@ -347,14 +350,17 @@ func testWriteCatchpoint(t *testing.T, rdb trackerdb.Store, datapath string, fil blockHeaderDigest := crypto.Hash([]byte{1, 2, 3}) catchpointLabel := fmt.Sprintf("%d#%v", blocksRound, blockHeaderDigest) // this is not a correct way to create a label, but it's good enough for this unit test catchpointFileHeader := CatchpointFileHeader{ - Version: CatchpointFileVersionV7, - BalancesRound: accountsRnd, - BlocksRound: blocksRound, - Totals: totals, - TotalAccounts: totalAccounts, - TotalChunks: totalChunks, - Catchpoint: catchpointLabel, - BlockHeaderDigest: blockHeaderDigest, + Version: CatchpointFileVersionV8, + BalancesRound: accountsRnd, + BlocksRound: blocksRound, + Totals: totals, + TotalAccounts: totalAccounts, + TotalKVs: totalKVs, + TotalOnlineAccounts: totalOnlineAccounts, + TotalOnlineRoundParams: totalOnlineRoundParams, + TotalChunks: totalChunks, + Catchpoint: catchpointLabel, + BlockHeaderDigest: blockHeaderDigest, } err = repackCatchpoint( context.Background(), catchpointFileHeader, biggestChunkLen, @@ -611,8 +617,10 @@ func TestFullCatchpointWriterOverflowAccounts(t *testing.T) { require.Equal(t, basics.Round(0), validThrough) } - err = l.reloadLedger() - require.NoError(t, err) + // TODO: uncomment if we want to test re-initializing the ledger fully + // currently this doesn't work, because reloadLedger runs migrations like txtail that require a working block DB + //err = l.reloadLedger() + //require.NoError(t, err) // now manually construct the MT and ensure the reading makeOrderedAccountsIter works as expected: // no errors on read, hashes match @@ -697,24 +705,41 @@ func testNewLedgerFromCatchpoint(t *testing.T, catchpointWriterReadAccess tracke var catchupProgress CatchpointCatchupAccessorProgress catchpointContent := readCatchpointFile(t, filepath) + var balancesRound basics.Round for _, catchpointData := range catchpointContent { + // get BalancesRound from header and use it to set the DB round + if catchpointData.headerName == CatchpointContentFileName { + var fileheader CatchpointFileHeader + err = protocol.Decode(catchpointData.data, &fileheader) + require.NoError(t, err) + balancesRound = fileheader.BalancesRound + } err = accessor.ProcessStagingBalances(context.Background(), catchpointData.headerName, catchpointData.data, &catchupProgress) require.NoError(t, err) } + require.NotZero(t, balancesRound, "no balances round found in test catchpoint file") + + // TODO: uncomment if we want to test re-initializing the ledger fully, by setting the balances round (DB round) + // for use by the trackers and migrations. However the txtail migration requires a working block DB, which most + // of these catchpoint tests don't copy over when saving/restoring. + // + // // Manually set the balances round. In regular catchpoint restore, this is set by StoreBalancesRound + // // when the first block is downloaded. + // err = l.trackerDB().Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { + // crw, err := tx.MakeCatchpointWriter() + // require.NoError(t, err) + + // err = crw.WriteCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupBalancesRound, uint64(balancesRound)) + // require.NoError(t, err) + // return nil + // }) + // require.NoError(t, err) err = accessor.BuildMerkleTrie(context.Background(), nil) require.NoError(t, err) - resetAccountDBToV6(t, l) - - err = l.trackerDBs.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error { - cw, err := tx.MakeCatchpointWriter() - if err != nil { - return err - } - - return cw.ApplyCatchpointStagingBalances(ctx, 0, 0) - }) + // Initializes DB, runs migrations, runs ApplyCatchpointStagingBalances + err = accessor.(*catchpointCatchupAccessorImpl).finishBalances(context.Background()) require.NoError(t, err) balanceTrieStats := func(db trackerdb.Store) merkletrie.Stats { @@ -790,6 +815,20 @@ func TestFullCatchpointWriter(t *testing.T) { } } +// ensure both committed all pending changes before taking a catchpoint +// another approach is to modify the test and craft round numbers, +// and make the ledger to generate catchpoint itself when it is time +func testCatchpointFlushRound(l *Ledger) { + // Clear the timer to ensure a flush + l.trackers.mu.Lock() + l.trackers.lastFlushTime = time.Time{} + l.trackers.mu.Unlock() + + r, _ := l.LatestCommitted() + l.trackers.committedUpTo(r) + l.trackers.waitAccountsWriting() +} + func TestExactAccountChunk(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() @@ -821,21 +860,8 @@ func TestExactAccountChunk(t *testing.T) { dl.fullBlock(&selfpay) } - // ensure both committed all pending changes before taking a catchpoint - // another approach is to modify the test and craft round numbers, - // and make the ledger to generate catchpoint itself when it is time - flushRound := func(l *Ledger) { - // Clear the timer to ensure a flush - l.trackers.mu.Lock() - l.trackers.lastFlushTime = time.Time{} - l.trackers.mu.Unlock() - - r, _ := l.LatestCommitted() - l.trackers.committedUpTo(r) - l.trackers.waitAccountsWriting() - } - flushRound(dl.generator) - flushRound(dl.validator) + testCatchpointFlushRound(dl.generator) + testCatchpointFlushRound(dl.validator) require.Eventually(t, func() bool { dl.generator.accts.accountsMu.RLock() @@ -854,7 +880,7 @@ func TestExactAccountChunk(t *testing.T) { catchpointFilePath := filepath.Join(tempDir, t.Name()+".catchpoint.tar.gz") cph := testWriteCatchpoint(t, dl.validator.trackerDB(), catchpointDataFilePath, catchpointFilePath, 0) - require.EqualValues(t, cph.TotalChunks, 1) + require.EqualValues(t, cph.TotalChunks, 2) l := testNewLedgerFromCatchpoint(t, dl.generator.trackerDB(), catchpointFilePath) defer l.Close() @@ -906,7 +932,7 @@ func TestCatchpointAfterTxns(t *testing.T) { catchpointFilePath := filepath.Join(tempDir, t.Name()+".catchpoint.tar.gz") cph := testWriteCatchpoint(t, dl.validator.trackerDB(), catchpointDataFilePath, catchpointFilePath, 0) - require.EqualValues(t, 2, cph.TotalChunks) + require.EqualValues(t, 3, cph.TotalChunks) l := testNewLedgerFromCatchpoint(t, dl.validator.trackerDB(), catchpointFilePath) defer l.Close() @@ -922,7 +948,7 @@ func TestCatchpointAfterTxns(t *testing.T) { // Write and read back in, and ensure even the last effect exists. cph = testWriteCatchpoint(t, dl.validator.trackerDB(), catchpointDataFilePath, catchpointFilePath, 0) - require.EqualValues(t, cph.TotalChunks, 2) // Still only 2 chunks, as last was in a recent block + require.EqualValues(t, cph.TotalChunks, 3) // Still only 3 chunks, as last was in a recent block // Drive home the point that `last` is _not_ included in the catchpoint by inspecting balance read from catchpoint. { @@ -938,7 +964,7 @@ func TestCatchpointAfterTxns(t *testing.T) { } cph = testWriteCatchpoint(t, dl.validator.trackerDB(), catchpointDataFilePath, catchpointFilePath, 0) - require.EqualValues(t, cph.TotalChunks, 3) + require.EqualValues(t, cph.TotalChunks, 4) l = testNewLedgerFromCatchpoint(t, dl.validator.trackerDB(), catchpointFilePath) defer l.Close() @@ -959,6 +985,169 @@ func TestCatchpointAfterTxns(t *testing.T) { } } +func TestCatchpointAfterStakeLookupTxns(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + + genBalances, addrs, _ := ledgertesting.NewTestGenesis(func(cfg *ledgertesting.GenesisCfg) { + cfg.OnlineCount = 1 + ledgertesting.TurnOffRewards(cfg) + }) + cfg := config.GetDefaultLocal() + dl := NewDoubleLedger(t, genBalances, protocol.ConsensusFuture, cfg, simpleLedgerOnDisk()) + defer dl.Close() + + initialStake := uint64(833333333333333) + expectedStake := initialStake + stakeAppSource := main(` +// ensure total online stake matches arg 0 +txn ApplicationArgs 0 +btoi +online_stake +== +assert +// ensure stake for accounts 1 (the only online account) matches arg 0 +txn Accounts 1 +voter_params_get VoterBalance +pop +txn ApplicationArgs 0 +btoi +== +assert +`) + // uses block 1 and 2 + stakeApp := dl.fundedApp(addrs[1], 1_000_000, stakeAppSource) + + // starting with block 3, make an app call and a pay in each block + callStakeApp := func(assertStake uint64) []*txntest.Txn { + stakebuf := make([]byte, 8) + binary.BigEndian.PutUint64(stakebuf, assertStake) + return []*txntest.Txn{ + // assert stake from 320 rounds ago + txntest.Txn{ + Type: "appl", + Sender: addrs[2], + ApplicationID: stakeApp, + Note: ledgertesting.RandomNote(), + Accounts: []basics.Address{addrs[0]}, + }.Args(string(stakebuf)), + // pay 1 microalgo to the only online account (takes effect in 320 rounds) + { + Type: "pay", + Sender: addrs[1], + Receiver: addrs[0], + Amount: 1, + }} + } + + // adds block 3 + vb := dl.fullBlock(callStakeApp(expectedStake)...) + require.Equal(t, vb.Block().Round(), basics.Round(3)) + require.Empty(t, vb.Block().ExpiredParticipationAccounts) + require.Empty(t, vb.Block().AbsentParticipationAccounts) + + // add blocks until round 322, after which stake will go up by 1 each round + for ; vb.Block().Round() < 322; vb = dl.fullBlock(callStakeApp(expectedStake)...) { + require.Empty(t, vb.Block().ExpiredParticipationAccounts) + require.Empty(t, vb.Block().AbsentParticipationAccounts) + + nextRnd := vb.Block().Round() + 1 + stake, err := dl.generator.OnlineCirculation(nextRnd.SubSaturate(320), nextRnd) + require.NoError(t, err) + require.Equal(t, expectedStake, stake.Raw) + } + require.Equal(t, vb.Block().Round(), basics.Round(322)) + + for vb.Block().Round() <= 1500 { + expectedStake++ // add 1 microalgo to the expected stake for the next block + + // the online_stake opcode in block 323 will look up OnlineCirculation(3, 323). + nextRnd := vb.Block().Round() + 1 + stake, err := dl.generator.OnlineCirculation(nextRnd.SubSaturate(320), nextRnd) + require.NoError(t, err) + require.Equal(t, expectedStake, stake.Raw) + + // build a new block for nextRnd, asserting online stake for nextRnd-320 + vb = dl.fullBlock(callStakeApp(expectedStake)...) + require.Empty(t, vb.Block().ExpiredParticipationAccounts) + require.Empty(t, vb.Block().AbsentParticipationAccounts) + } + + // wait for tracker to flush + testCatchpointFlushRound(dl.generator) + testCatchpointFlushRound(dl.validator) + + // ensure flush and latest round all were OK + genDBRound := dl.generator.LatestTrackerCommitted() + valDBRound := dl.validator.LatestTrackerCommitted() + require.NotZero(t, genDBRound) + require.NotZero(t, valDBRound) + require.Equal(t, genDBRound, valDBRound) + require.Equal(t, 1497, int(genDBRound)) + genLatestRound := dl.generator.Latest() + valLatestRound := dl.validator.Latest() + require.NotZero(t, genLatestRound) + require.NotZero(t, valLatestRound) + require.Equal(t, genLatestRound, valLatestRound) + // latest should be 4 rounds ahead of DB round + require.Equal(t, genDBRound+basics.Round(cfg.MaxAcctLookback), genLatestRound) + + t.Log("DB round generator", genDBRound, "validator", valDBRound) + t.Log("Latest round generator", genLatestRound, "validator", valLatestRound) + + genOAHash, genOARows, err := calculateVerificationHash(context.Background(), dl.generator.trackerDB().MakeOnlineAccountsIter) + require.NoError(t, err) + valOAHash, valOARows, err := calculateVerificationHash(context.Background(), dl.validator.trackerDB().MakeOnlineAccountsIter) + require.NoError(t, err) + require.Equal(t, genOAHash, valOAHash) + require.NotZero(t, genOAHash) + require.Equal(t, genOARows, valOARows) + require.NotZero(t, genOARows) + + genORPHash, genORPRows, err := calculateVerificationHash(context.Background(), dl.generator.trackerDB().MakeOnlineRoundParamsIter) + require.NoError(t, err) + valORPHash, valORPRows, err := calculateVerificationHash(context.Background(), dl.validator.trackerDB().MakeOnlineRoundParamsIter) + require.NoError(t, err) + require.Equal(t, genORPHash, valORPHash) + require.NotZero(t, genORPHash) + require.Equal(t, genORPRows, valORPRows) + require.NotZero(t, genORPRows) + + tempDir := t.TempDir() + catchpointDataFilePath := filepath.Join(tempDir, t.Name()+".data") + catchpointFilePath := filepath.Join(tempDir, t.Name()+".catchpoint.tar.gz") + + cph := testWriteCatchpoint(t, dl.generator.trackerDB(), catchpointDataFilePath, catchpointFilePath, 0) + require.EqualValues(t, 7, cph.TotalChunks) + + l := testNewLedgerFromCatchpoint(t, dl.generator.trackerDB(), catchpointFilePath) + defer l.Close() + + catchpointOAHash, catchpointOARows, err := calculateVerificationHash(context.Background(), l.trackerDBs.MakeOnlineAccountsIter) + require.NoError(t, err) + require.Equal(t, genOAHash, catchpointOAHash) + t.Log("catchpoint onlineaccounts hash", catchpointOAHash, "matches") + require.Equal(t, genOARows, catchpointOARows) + + catchpointORPHash, catchpointORPRows, err := calculateVerificationHash(context.Background(), l.trackerDBs.MakeOnlineRoundParamsIter) + require.NoError(t, err) + require.Equal(t, genORPHash, catchpointORPHash) + t.Log("catchpoint onlineroundparams hash", catchpointORPHash, "matches") + require.Equal(t, genORPRows, catchpointORPRows) + + oar, err := l.trackerDBs.MakeOnlineAccountsOptimizedReader() + require.NoError(t, err) + + for i := genDBRound; i >= (genDBRound - 1000); i-- { + oad, err := oar.LookupOnline(addrs[0], basics.Round(i)) + require.NoError(t, err) + // block 3 started paying 1 microalgo to addrs[0] per round + expected := initialStake + uint64(i) - 2 + require.Equal(t, expected, oad.AccountData.MicroAlgos.Raw) + } + +} + // Exercises a sequence of box modifications that caused a bug in // catchpoint writes. // @@ -1028,7 +1217,7 @@ func TestCatchpointAfterBoxTxns(t *testing.T) { catchpointFilePath := filepath.Join(tempDir, t.Name()+".catchpoint.tar.gz") cph := testWriteCatchpoint(t, dl.generator.trackerDB(), catchpointDataFilePath, catchpointFilePath, 0) - require.EqualValues(t, 2, cph.TotalChunks) + require.EqualValues(t, 3, cph.TotalChunks) l := testNewLedgerFromCatchpoint(t, dl.generator.trackerDB(), catchpointFilePath) defer l.Close() diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go index df7772de53..8adbdc8dfb 100644 --- a/ledger/catchpointtracker.go +++ b/ledger/catchpointtracker.go @@ -64,6 +64,10 @@ const ( // CatchpointFileVersionV7 is the catchpoint file version that is matching database schema V10. // This version introduced state proof verification data and versioning for CatchpointLabel. CatchpointFileVersionV7 = uint64(0202) + // CatchpointFileVersionV8 is the catchpoint file version that includes V6 and V7 data, as well + // as historical onlineaccounts and onlineroundparamstail table data (added in DB version V7, + // but until this version initialized with current round data, not 320 rounds of historical info). + CatchpointFileVersionV8 = uint64(0203) // CatchpointContentFileName is a name of a file with catchpoint header info inside tar archive CatchpointContentFileName = "content.msgpack" @@ -212,13 +216,13 @@ func (ct *catchpointTracker) getSPVerificationData() (encodedData []byte, spVeri func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basics.Round, blockProto protocol.ConsensusVersion, updatingBalancesDuration time.Duration) error { ct.log.Infof("finishing catchpoint's first stage dbRound: %d", dbRound) - var totalKVs uint64 - var totalAccounts uint64 + var totalAccounts, totalKVs, totalOnlineAccounts, totalOnlineRoundParams uint64 var totalChunks uint64 var biggestChunkLen uint64 var spVerificationHash crypto.Digest var spVerificationEncodedData []byte var catchpointGenerationStats telemetryspec.CatchpointGenerationEventDetails + var onlineAccountsHash, onlineRoundParamsHash crypto.Digest params := config.Consensus[blockProto] if params.EnableCatchpointsWithSPContexts { @@ -230,6 +234,26 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic return err } } + if params.EnableCatchpointsWithOnlineAccounts { + // Generate hashes of the onlineaccounts and onlineroundparams tables. + err := ct.dbs.Snapshot(func(ctx context.Context, tx trackerdb.SnapshotScope) error { + var dbErr error + onlineAccountsHash, _, dbErr = calculateVerificationHash(ctx, tx.MakeOnlineAccountsIter) + if dbErr != nil { + return dbErr + + } + + onlineRoundParamsHash, _, dbErr = calculateVerificationHash(ctx, tx.MakeOnlineRoundParamsIter) + if dbErr != nil { + return dbErr + } + return nil + }) + if err != nil { + return err + } + } if ct.enableGeneratingCatchpointFiles { // Generate the catchpoint file. This is done inline so that it will @@ -239,7 +263,7 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic var err error catchpointGenerationStats.BalancesWriteTime = uint64(updatingBalancesDuration.Nanoseconds()) - totalKVs, totalAccounts, totalChunks, biggestChunkLen, err = ct.generateCatchpointData( + totalAccounts, totalKVs, totalOnlineAccounts, totalOnlineRoundParams, totalChunks, biggestChunkLen, err = ct.generateCatchpointData( ctx, dbRound, &catchpointGenerationStats, spVerificationEncodedData) ct.catchpointDataWriting.Store(0) if err != nil { @@ -253,7 +277,9 @@ func (ct *catchpointTracker) finishFirstStage(ctx context.Context, dbRound basic return err } - err = ct.recordFirstStageInfo(ctx, tx, &catchpointGenerationStats, dbRound, totalKVs, totalAccounts, totalChunks, biggestChunkLen, spVerificationHash) + err = ct.recordFirstStageInfo(ctx, tx, &catchpointGenerationStats, dbRound, + totalAccounts, totalKVs, totalOnlineAccounts, totalOnlineRoundParams, totalChunks, biggestChunkLen, + spVerificationHash, onlineAccountsHash, onlineRoundParamsHash) if err != nil { return err } @@ -764,8 +790,14 @@ func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound var labelMaker ledgercore.CatchpointLabelMaker var version uint64 params := config.Consensus[blockProto] - if params.EnableCatchpointsWithSPContexts { - labelMaker = ledgercore.MakeCatchpointLabelMakerCurrent(round, &blockHash, &dataInfo.TrieBalancesHash, dataInfo.Totals, &dataInfo.StateProofVerificationHash) + if params.EnableCatchpointsWithOnlineAccounts { + if !params.EnableCatchpointsWithSPContexts { + return fmt.Errorf("invalid params for catchpoint file version v8: SP contexts not enabled") + } + labelMaker = ledgercore.MakeCatchpointLabelMakerCurrent(round, &blockHash, &dataInfo.TrieBalancesHash, dataInfo.Totals, &dataInfo.StateProofVerificationHash, &dataInfo.OnlineAccountsHash, &dataInfo.OnlineRoundParamsHash) + version = CatchpointFileVersionV8 + } else if params.EnableCatchpointsWithSPContexts { + labelMaker = ledgercore.MakeCatchpointLabelMakerV7(round, &blockHash, &dataInfo.TrieBalancesHash, dataInfo.Totals, &dataInfo.StateProofVerificationHash) version = CatchpointFileVersionV7 } else { labelMaker = ledgercore.MakeCatchpointLabelMakerV6(round, &blockHash, &dataInfo.TrieBalancesHash, dataInfo.Totals) @@ -806,15 +838,17 @@ func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound // Make a catchpoint file. header := CatchpointFileHeader{ - Version: version, - BalancesRound: accountsRound, - BlocksRound: round, - Totals: dataInfo.Totals, - TotalAccounts: dataInfo.TotalAccounts, - TotalKVs: dataInfo.TotalKVs, - TotalChunks: dataInfo.TotalChunks, - Catchpoint: label, - BlockHeaderDigest: blockHash, + Version: version, + BalancesRound: accountsRound, + BlocksRound: round, + Totals: dataInfo.Totals, + TotalAccounts: dataInfo.TotalAccounts, + TotalKVs: dataInfo.TotalKVs, + TotalOnlineAccounts: dataInfo.TotalOnlineAccounts, + TotalOnlineRoundParams: dataInfo.TotalOnlineRoundParams, + TotalChunks: dataInfo.TotalChunks, + Catchpoint: label, + BlockHeaderDigest: blockHash, } relCatchpointFilePath := filepath.Join(trackerdb.CatchpointDirName, trackerdb.MakeCatchpointFilePath(round)) @@ -855,6 +889,8 @@ func (ct *catchpointTracker) createCatchpoint(ctx context.Context, accountsRound With("writingDuration", uint64(time.Since(startTime).Nanoseconds())). With("accountsCount", dataInfo.TotalAccounts). With("kvsCount", dataInfo.TotalKVs). + With("onlineAccountsCount", dataInfo.TotalOnlineAccounts). + With("onlineRoundParamsCount", dataInfo.TotalOnlineRoundParams). With("fileSize", fileInfo.Size()). With("filepath", relCatchpointFilePath). With("catchpointLabel", label). @@ -1173,7 +1209,7 @@ func (ct *catchpointTracker) isWritingCatchpointDataFile() bool { // - Balance and KV chunk (named balances.x.msgpack). // ... // - Balance and KV chunk (named balances.x.msgpack). -func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, accountsRound basics.Round, catchpointGenerationStats *telemetryspec.CatchpointGenerationEventDetails, encodedSPData []byte) (totalKVs, totalAccounts, totalChunks, biggestChunkLen uint64, err error) { +func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, accountsRound basics.Round, catchpointGenerationStats *telemetryspec.CatchpointGenerationEventDetails, encodedSPData []byte) (totalAccounts, totalKVs, totalOnlineAccounts, totalOnlineRoundParams, totalChunks, biggestChunkLen uint64, err error) { ct.log.Debugf("catchpointTracker.generateCatchpointData() writing catchpoint accounts for round %d", accountsRound) startTime := time.Now() @@ -1261,19 +1297,25 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account ledgerGeneratecatchpointMicros.AddMicrosecondsSince(start, nil) if err != nil { ct.log.Warnf("catchpointTracker.generateCatchpointData() %v", err) - return 0, 0, 0, 0, err + return 0, 0, 0, 0, 0, 0, err } catchpointGenerationStats.FileSize = uint64(catchpointWriter.writtenBytes) catchpointGenerationStats.WritingDuration = uint64(time.Since(startTime).Nanoseconds()) catchpointGenerationStats.AccountsCount = catchpointWriter.totalAccounts catchpointGenerationStats.KVsCount = catchpointWriter.totalKVs + catchpointGenerationStats.OnlineAccountsCount = catchpointWriter.totalOnlineAccounts + catchpointGenerationStats.OnlineRoundParamsCount = catchpointWriter.totalOnlineRoundParams catchpointGenerationStats.AccountsRound = uint64(accountsRound) - return catchpointWriter.totalKVs, catchpointWriter.totalAccounts, catchpointWriter.chunkNum, catchpointWriter.biggestChunkLen, nil + return catchpointWriter.totalAccounts, catchpointWriter.totalKVs, catchpointWriter.totalOnlineAccounts, catchpointWriter.totalOnlineRoundParams, catchpointWriter.chunkNum, catchpointWriter.biggestChunkLen, nil } -func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx trackerdb.TransactionScope, catchpointGenerationStats *telemetryspec.CatchpointGenerationEventDetails, accountsRound basics.Round, totalKVs uint64, totalAccounts uint64, totalChunks uint64, biggestChunkLen uint64, stateProofVerificationHash crypto.Digest) error { +func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx trackerdb.TransactionScope, + catchpointGenerationStats *telemetryspec.CatchpointGenerationEventDetails, + accountsRound basics.Round, + totalAccounts, totalKVs, totalOnlineAccounts, totalOnlineRoundParams, totalChunks, biggestChunkLen uint64, + stateProofVerificationHash, onlineAccountsVerificationHash, onlineRoundParamsVerificationHash crypto.Digest) error { ar, err := tx.MakeAccountsReader() if err != nil { return err @@ -1316,10 +1358,14 @@ func (ct *catchpointTracker) recordFirstStageInfo(ctx context.Context, tx tracke Totals: accountTotals, TotalAccounts: totalAccounts, TotalKVs: totalKVs, + TotalOnlineAccounts: totalOnlineAccounts, + TotalOnlineRoundParams: totalOnlineRoundParams, TotalChunks: totalChunks, BiggestChunkLen: biggestChunkLen, TrieBalancesHash: trieBalancesHash, StateProofVerificationHash: stateProofVerificationHash, + OnlineAccountsHash: onlineAccountsVerificationHash, + OnlineRoundParamsHash: onlineRoundParamsVerificationHash, } err = cw.InsertOrReplaceCatchpointFirstStageInfo(ctx, accountsRound, &info) diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go index da2408946b..ff84cabcb3 100644 --- a/ledger/catchpointtracker_test.go +++ b/ledger/catchpointtracker_test.go @@ -363,7 +363,7 @@ func createCatchpoint(t *testing.T, ct *catchpointTracker, accountsRound basics. require.NoError(t, err) var catchpointGenerationStats telemetryspec.CatchpointGenerationEventDetails - _, _, _, biggestChunkLen, err := ct.generateCatchpointData( + _, _, _, _, _, biggestChunkLen, err := ct.generateCatchpointData( context.Background(), accountsRound, &catchpointGenerationStats, spVerificationEncodedData) require.NoError(t, err) @@ -1882,10 +1882,6 @@ func TestHashContract(t *testing.T) { func TestCatchpointFastUpdates(t *testing.T) { partitiontest.PartitionTest(t) - if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { - t.Skip("This test is too slow on ARM and causes CI builds to time out") - } - proto := config.Consensus[protocol.ConsensusFuture] accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)} @@ -1925,6 +1921,7 @@ func TestCatchpointFastUpdates(t *testing.T) { wg := sync.WaitGroup{} + lastRound := basics.Round(0) for i := basics.Round(initialBlocksCount); i < basics.Round(proto.CatchpointLookback+15); i++ { rewardLevelDelta := crypto.RandUint64() % 5 rewardLevel += rewardLevelDelta @@ -1959,10 +1956,21 @@ func TestCatchpointFastUpdates(t *testing.T) { defer wg.Done() ml.trackers.committedUpTo(round) }(i) + lastRound = i } wg.Wait() ml.trackers.waitAccountsWriting() + for ml.trackers.getDbRound() <= basics.Round(proto.CatchpointLookback) { + // db round stuck <= 320? likely committedUpTo dropped some commit tasks, due to deferredCommits channel full + // so give it another try + ml.trackers.committedUpTo(lastRound) + require.Eventually(t, func() bool { + //ml.trackers.waitAccountsWriting() + return ml.trackers.getDbRound() > basics.Round(proto.CatchpointLookback) + }, 5*time.Second, 100*time.Millisecond) + } + require.NotEmpty(t, ct.GetLastCatchpointLabel()) } diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go index 315fa6b003..de10baa0e1 100644 --- a/ledger/catchupaccessor.go +++ b/ledger/catchupaccessor.go @@ -69,7 +69,7 @@ type CatchpointCatchupAccessor interface { GetCatchupBlockRound(ctx context.Context) (round basics.Round, err error) // GetVerifyData returns the balances hash, spver hash and totals used by VerifyCatchpoint - GetVerifyData(ctx context.Context) (balancesHash crypto.Digest, spverHash crypto.Digest, totals ledgercore.AccountTotals, err error) + GetVerifyData(ctx context.Context) (balancesHash, spverHash, onlineAccountsHash, onlineRoundParamsHash crypto.Digest, totals ledgercore.AccountTotals, err error) // VerifyCatchpoint verifies that the catchpoint is valid by reconstructing the label. VerifyCatchpoint(ctx context.Context, blk *bookkeeping.Block) (err error) @@ -103,6 +103,8 @@ type stagingWriter interface { writeCreatables(context.Context, []trackerdb.NormalizedAccountBalance) error writeHashes(context.Context, []trackerdb.NormalizedAccountBalance) error writeKVs(context.Context, []encoded.KVRecordV6) error + writeOnlineAccounts(context.Context, []encoded.OnlineAccountRecordV6) error + writeOnlineRoundParams(context.Context, []encoded.OnlineRoundParamsRecordV6) error isShared() bool } @@ -165,6 +167,26 @@ func (w *stagingWriterImpl) writeKVs(ctx context.Context, kvrs []encoded.KVRecor }) } +func (w *stagingWriterImpl) writeOnlineAccounts(ctx context.Context, accts []encoded.OnlineAccountRecordV6) error { + return w.wdb.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { + crw, err := tx.MakeCatchpointReaderWriter() + if err != nil { + return err + } + return crw.WriteCatchpointStagingOnlineAccounts(ctx, accts) + }) +} + +func (w *stagingWriterImpl) writeOnlineRoundParams(ctx context.Context, params []encoded.OnlineRoundParamsRecordV6) error { + return w.wdb.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { + crw, err := tx.MakeCatchpointReaderWriter() + if err != nil { + return err + } + return crw.WriteCatchpointStagingOnlineRoundParams(ctx, params) + }) +} + func (w *stagingWriterImpl) writeCreatables(ctx context.Context, balances []trackerdb.NormalizedAccountBalance) error { return w.wdb.Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) error { crw, err := tx.MakeCatchpointReaderWriter() @@ -346,24 +368,30 @@ func (c *catchpointCatchupAccessorImpl) ResetStagingBalances(ctx context.Context // CatchpointCatchupAccessorProgress is used by the caller of ProcessStagingBalances to obtain progress information type CatchpointCatchupAccessorProgress struct { - TotalAccounts uint64 - ProcessedAccounts uint64 - ProcessedBytes uint64 - TotalKVs uint64 - ProcessedKVs uint64 - TotalChunks uint64 - SeenHeader bool - Version uint64 - TotalAccountHashes uint64 + TotalAccounts uint64 + ProcessedAccounts uint64 + ProcessedBytes uint64 + TotalKVs uint64 + ProcessedKVs uint64 + TotalOnlineAccounts uint64 + ProcessedOnlineAccounts uint64 + TotalOnlineRoundParams uint64 + ProcessedOnlineRoundParams uint64 + TotalChunks uint64 + SeenHeader bool + Version uint64 + TotalAccountHashes uint64 // Having the cachedTrie here would help to accelerate the catchup process since the trie maintain an internal cache of nodes. // While rebuilding the trie, we don't want to force and reload (some) of these nodes into the cache for each catchpoint file chunk. cachedTrie *merkletrie.Trie - BalancesWriteDuration time.Duration - CreatablesWriteDuration time.Duration - HashesWriteDuration time.Duration - KVWriteDuration time.Duration + BalancesWriteDuration time.Duration + CreatablesWriteDuration time.Duration + HashesWriteDuration time.Duration + KVWriteDuration time.Duration + OnlineAccountsWriteDuration time.Duration + OnlineRoundParamsWriteDuration time.Duration } // ProcessStagingBalances deserialize the given bytes as a temporary staging balances @@ -418,6 +446,8 @@ func (c *catchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex case CatchpointFileVersionV5: case CatchpointFileVersionV6: case CatchpointFileVersionV7: + case CatchpointFileVersionV8: + default: return fmt.Errorf("CatchpointCatchupAccessorImpl::processStagingContent: unable to process catchpoint - version %d is not supported", fileHeader.Version) } @@ -459,6 +489,8 @@ func (c *catchpointCatchupAccessorImpl) processStagingContent(ctx context.Contex progress.SeenHeader = true progress.TotalAccounts = fileHeader.TotalAccounts progress.TotalKVs = fileHeader.TotalKVs + progress.TotalOnlineAccounts = fileHeader.TotalOnlineAccounts + progress.TotalOnlineRoundParams = fileHeader.TotalOnlineRoundParams progress.TotalChunks = fileHeader.TotalChunks progress.Version = fileHeader.Version @@ -480,6 +512,8 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte var normalizedAccountBalances []trackerdb.NormalizedAccountBalance var expectingMoreEntries []bool var chunkKVs []encoded.KVRecordV6 + var chunkOnlineAccounts []encoded.OnlineAccountRecordV6 + var chunkOnlineRoundParams []encoded.OnlineRoundParamsRecordV6 switch progress.Version { default: @@ -501,16 +535,21 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte expectingMoreEntries = make([]bool, len(balances.Balances)) case CatchpointFileVersionV6: + // V6 split accounts from resources; later, KVs were added to the v6 chunk format fallthrough case CatchpointFileVersionV7: + // V7 added state proof verification data + hash, but left v6 chunk format unchanged + fallthrough + case CatchpointFileVersionV8: + // V8 added online accounts and online round params data + hashes, and added them to the v6 chunk format var chunk catchpointFileChunkV6 err = protocol.Decode(bytes, &chunk) if err != nil { return err } - if len(chunk.Balances) == 0 && len(chunk.KVs) == 0 { - return fmt.Errorf("processStagingBalances received a chunk with no accounts or KVs") + if chunk.empty() { + return fmt.Errorf("processStagingBalances received an empty chunk") } normalizedAccountBalances, err = prepareNormalizedBalancesV6(chunk.Balances, c.ledger.GenesisProto()) @@ -519,6 +558,8 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte expectingMoreEntries[i] = balance.ExpectingMoreEntries } chunkKVs = chunk.KVs + chunkOnlineAccounts = chunk.OnlineAccounts + chunkOnlineRoundParams = chunk.OnlineRoundParams } if err != nil { @@ -594,14 +635,8 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte wg := sync.WaitGroup{} - var errBalances error - var errCreatables error - var errHashes error - var errKVs error - var durBalances time.Duration - var durCreatables time.Duration - var durHashes time.Duration - var durKVs time.Duration + var errBalances, errCreatables, errHashes, errKVs, errOnlineAccounts, errOnlineRoundParams error + var durBalances, durCreatables, durHashes, durKVs, durOnlineAccounts, durOnlineRoundParams time.Duration // start the balances writer wg.Add(1) @@ -666,6 +701,26 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte durKVs = time.Since(writeKVsStart) }() + // start the online accounts writer + wg.Add(1) + go func() { + defer wg.Done() + + writeOnlineAccountsStart := time.Now() + errOnlineAccounts = c.stagingWriter.writeOnlineAccounts(ctx, chunkOnlineAccounts) + durOnlineAccounts = time.Since(writeOnlineAccountsStart) + }() + + // start the rounds params writer + wg.Add(1) + go func() { + defer wg.Done() + + writeOnlineRoundParamsStart := time.Now() + errOnlineRoundParams = c.stagingWriter.writeOnlineRoundParams(ctx, chunkOnlineRoundParams) + durOnlineRoundParams = time.Since(writeOnlineRoundParamsStart) + }() + wg.Wait() if errBalances != nil { @@ -680,15 +735,25 @@ func (c *catchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte if errKVs != nil { return errKVs } + if errOnlineAccounts != nil { + return errOnlineAccounts + } + if errOnlineRoundParams != nil { + return errOnlineRoundParams + } progress.BalancesWriteDuration += durBalances progress.CreatablesWriteDuration += durCreatables progress.HashesWriteDuration += durHashes progress.KVWriteDuration += durKVs + progress.OnlineAccountsWriteDuration += durOnlineAccounts + progress.OnlineRoundParamsWriteDuration += durOnlineRoundParams ledgerProcessstagingbalancesMicros.AddMicrosecondsSince(start, nil) progress.ProcessedBytes += uint64(len(bytes)) progress.ProcessedKVs += uint64(len(chunkKVs)) + progress.ProcessedOnlineAccounts += uint64(len(chunkOnlineAccounts)) + progress.ProcessedOnlineRoundParams += uint64(len(chunkOnlineRoundParams)) for _, acctBal := range normalizedAccountBalances { progress.TotalAccountHashes += uint64(len(acctBal.AccountHashes)) if !acctBal.PartialBalance { @@ -721,7 +786,7 @@ func countHashes(hashes [][]byte) (accountCount, kvCount uint64) { accountCount++ } } - return accountCount, kvCount + return } // BuildMerkleTrie would process the catchpointpendinghashes and insert all the items in it into the merkle trie @@ -931,7 +996,7 @@ func (c *catchpointCatchupAccessorImpl) GetCatchupBlockRound(ctx context.Context return basics.Round(iRound), nil } -func (c *catchpointCatchupAccessorImpl) GetVerifyData(ctx context.Context) (balancesHash crypto.Digest, spverHash crypto.Digest, totals ledgercore.AccountTotals, err error) { +func (c *catchpointCatchupAccessorImpl) GetVerifyData(ctx context.Context) (balancesHash, spverHash, onlineAccountsHash, onlineRoundParamsHash crypto.Digest, totals ledgercore.AccountTotals, err error) { var rawStateProofVerificationContext []ledgercore.StateProofVerificationContext err = c.ledger.trackerDB().Transaction(func(ctx context.Context, tx trackerdb.TransactionScope) (err error) { @@ -966,16 +1031,61 @@ func (c *catchpointCatchupAccessorImpl) GetVerifyData(ctx context.Context) (bala return fmt.Errorf("unable to get state proof verification data: %v", err) } + onlineAccountsHash, _, err = calculateVerificationHash(ctx, tx.MakeOnlineAccountsIter) + if err != nil { + return fmt.Errorf("unable to get online accounts verification data: %v", err) + } + + onlineRoundParamsHash, _, err = calculateVerificationHash(ctx, tx.MakeOnlineRoundParamsIter) + if err != nil { + return fmt.Errorf("unable to get online round params verification data: %v", err) + } + return }) if err != nil { - return crypto.Digest{}, crypto.Digest{}, ledgercore.AccountTotals{}, err + return crypto.Digest{}, crypto.Digest{}, crypto.Digest{}, crypto.Digest{}, ledgercore.AccountTotals{}, err } wrappedContext := catchpointStateProofVerificationContext{Data: rawStateProofVerificationContext} spverHash = crypto.HashObj(wrappedContext) - return balancesHash, spverHash, totals, err + return balancesHash, spverHash, onlineAccountsHash, onlineRoundParamsHash, totals, nil +} + +// calculateVerificationHash iterates over a TableIterator, hashes each item, and returns a hash of +// all the concatenated item hashes. It is used to verify onlineaccounts and onlineroundparams tables, +// both at restore time (in catchpointCatchupAccessorImpl) and snapshot time (in catchpointTracker). +func calculateVerificationHash[T crypto.Hashable]( + ctx context.Context, + iterFactory func(context.Context) (trackerdb.TableIterator[T], error), +) (crypto.Digest, uint64, error) { + + rows, err := iterFactory(ctx) + if err != nil { + return crypto.Digest{}, 0, err + } + defer rows.Close() + hasher := crypto.HashFactory{HashType: crypto.Sha512_256}.NewHash() + cnt := uint64(0) + for rows.Next() { + item, err := rows.GetItem() + if err != nil { + return crypto.Digest{}, 0, err + } + + h := crypto.HashObj(item) + _, err = hasher.Write(h[:]) + if err != nil { + return crypto.Digest{}, 0, err + } + cnt++ + } + ret := hasher.Sum(nil) + if len(ret) != crypto.DigestSize { + return crypto.Digest{}, 0, fmt.Errorf("unexpected hash size: %d", len(ret)) + } + return crypto.Digest(ret), cnt, nil } // VerifyCatchpoint verifies that the catchpoint is valid by reconstructing the label. @@ -1003,7 +1113,7 @@ func (c *catchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl start := time.Now() ledgerVerifycatchpointCount.Inc(nil) - balancesHash, spVerificationHash, totals, err := c.GetVerifyData(ctx) + balancesHash, spVerificationHash, onlineAccountsHash, onlineRoundParamsHash, totals, err := c.GetVerifyData(ctx) ledgerVerifycatchpointMicros.AddMicrosecondsSince(start, nil) if err != nil { return err @@ -1016,8 +1126,12 @@ func (c *catchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl blockDigest := blk.Digest() if version <= CatchpointFileVersionV6 { catchpointLabelMaker = ledgercore.MakeCatchpointLabelMakerV6(blockRound, &blockDigest, &balancesHash, totals) + } else if version == CatchpointFileVersionV7 { + catchpointLabelMaker = ledgercore.MakeCatchpointLabelMakerV7(blockRound, &blockDigest, &balancesHash, totals, &spVerificationHash) + } else if version == CatchpointFileVersionV8 { + catchpointLabelMaker = ledgercore.MakeCatchpointLabelMakerCurrent(blockRound, &blockDigest, &balancesHash, totals, &spVerificationHash, &onlineAccountsHash, &onlineRoundParamsHash) } else { - catchpointLabelMaker = ledgercore.MakeCatchpointLabelMakerCurrent(blockRound, &blockDigest, &balancesHash, totals, &spVerificationHash) + return fmt.Errorf("unable to verify catchpoint - version %d not supported", version) } generatedLabel := ledgercore.MakeLabel(catchpointLabelMaker) @@ -1155,7 +1269,7 @@ func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err return err } - var balancesRound, hashRound uint64 + var balancesRound, hashRound, catchpointFileVersion uint64 var totals ledgercore.AccountTotals balancesRound, err = crw.ReadCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupBalancesRound) @@ -1168,6 +1282,11 @@ func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err return err } + catchpointFileVersion, err = c.catchpointStore.ReadCatchpointStateUint64(ctx, trackerdb.CatchpointStateCatchupVersion) + if err != nil { + return fmt.Errorf("unable to retrieve catchpoint version: %v", err) + } + totals, err = ar.AccountsTotals(ctx, true) if err != nil { return err @@ -1190,20 +1309,20 @@ func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err if err != nil { return err } - { - tp := trackerdb.Params{ - InitAccounts: c.ledger.GenesisAccounts(), - InitProto: c.ledger.GenesisProtoVersion(), - GenesisHash: c.ledger.GenesisHash(), - FromCatchpoint: true, - CatchpointEnabled: c.ledger.catchpoint.catchpointEnabled(), - DbPathPrefix: c.ledger.catchpoint.dbDirectory, - BlockDb: c.ledger.blockDBs, - } - _, err = tx.RunMigrations(ctx, tp, c.ledger.log, 6 /*target database version*/) - if err != nil { - return err - } + + tp := trackerdb.Params{ + InitAccounts: c.ledger.GenesisAccounts(), + InitProto: c.ledger.GenesisProtoVersion(), + GenesisHash: c.ledger.GenesisHash(), + FromCatchpoint: true, + CatchpointEnabled: c.ledger.catchpoint.catchpointEnabled(), + DbPathPrefix: c.ledger.catchpoint.dbDirectory, + BlockDb: c.ledger.blockDBs, + } + // Upgrade to v6 + _, err = tx.RunMigrations(ctx, tp, c.ledger.log, 6 /*target database version*/) + if err != nil { + return err } err = crw.ApplyCatchpointStagingBalances(ctx, basics.Round(balancesRound), basics.Round(hashRound)) @@ -1211,6 +1330,20 @@ func (c *catchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err return err } + if catchpointFileVersion == CatchpointFileVersionV8 { // This catchpoint contains onlineaccounts and onlineroundparamstail tables. + // Upgrade to v7 (which adds the onlineaccounts & onlineroundparamstail tables, among others) + _, err = tx.RunMigrations(ctx, tp, c.ledger.log, 7) + if err != nil { + return err + } + + // Now that we have upgraded to v7, replace the onlineaccounts and onlineroundparamstail with the staged catchpoint tables. + err = crw.ApplyCatchpointStagingTablesV7(ctx) + if err != nil { + return err + } + } + err = aw.AccountsPutTotals(totals, false) if err != nil { return err diff --git a/ledger/catchupaccessor_test.go b/ledger/catchupaccessor_test.go index 37f27c6794..50ae986faa 100644 --- a/ledger/catchupaccessor_test.go +++ b/ledger/catchupaccessor_test.go @@ -541,6 +541,14 @@ func (w *testStagingWriter) writeKVs(ctx context.Context, kvrs []encoded.KVRecor return nil } +func (w *testStagingWriter) writeOnlineAccounts(ctx context.Context, accounts []encoded.OnlineAccountRecordV6) error { + return nil +} + +func (w *testStagingWriter) writeOnlineRoundParams(ctx context.Context, params []encoded.OnlineRoundParamsRecordV6) error { + return nil +} + func (w *testStagingWriter) writeHashes(ctx context.Context, balances []trackerdb.NormalizedAccountBalance) error { for _, bal := range balances { for _, hash := range bal.AccountHashes { diff --git a/ledger/encoded/msgp_gen.go b/ledger/encoded/msgp_gen.go index cc2422ded0..58de0bc8a1 100644 --- a/ledger/encoded/msgp_gen.go +++ b/ledger/encoded/msgp_gen.go @@ -41,6 +41,26 @@ import ( // |-----> (*) MsgIsZero // |-----> KVRecordV6MaxSize() // +// OnlineAccountRecordV6 +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) UnmarshalMsgWithState +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// |-----> OnlineAccountRecordV6MaxSize() +// +// OnlineRoundParamsRecordV6 +// |-----> (*) MarshalMsg +// |-----> (*) CanMarshalMsg +// |-----> (*) UnmarshalMsg +// |-----> (*) UnmarshalMsgWithState +// |-----> (*) CanUnmarshalMsg +// |-----> (*) Msgsize +// |-----> (*) MsgIsZero +// |-----> OnlineRoundParamsRecordV6MaxSize() +// // MarshalMsg implements msgp.Marshaler func (z *BalanceRecordV5) MarshalMsg(b []byte) (o []byte) { @@ -645,3 +665,360 @@ func KVRecordV6MaxSize() (s int) { s = 1 + 2 + msgp.BytesPrefixSize + KVRecordV6MaxKeyLength + 2 + msgp.BytesPrefixSize + KVRecordV6MaxValueLength return } + +// MarshalMsg implements msgp.Marshaler +func (z *OnlineAccountRecordV6) MarshalMsg(b []byte) (o []byte) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(5) + var zb0001Mask uint8 /* 6 bits */ + if (*z).Address.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x2 + } + if (*z).Data.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x4 + } + if (*z).NormalizedOnlineBalance == 0 { + zb0001Len-- + zb0001Mask |= 0x8 + } + if (*z).UpdateRound.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x10 + } + if (*z).VoteLastValid.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x20 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len != 0 { + if (zb0001Mask & 0x2) == 0 { // if not empty + // string "addr" + o = append(o, 0xa4, 0x61, 0x64, 0x64, 0x72) + o = (*z).Address.MarshalMsg(o) + } + if (zb0001Mask & 0x4) == 0 { // if not empty + // string "data" + o = append(o, 0xa4, 0x64, 0x61, 0x74, 0x61) + o = (*z).Data.MarshalMsg(o) + } + if (zb0001Mask & 0x8) == 0 { // if not empty + // string "nob" + o = append(o, 0xa3, 0x6e, 0x6f, 0x62) + o = msgp.AppendUint64(o, (*z).NormalizedOnlineBalance) + } + if (zb0001Mask & 0x10) == 0 { // if not empty + // string "upd" + o = append(o, 0xa3, 0x75, 0x70, 0x64) + o = (*z).UpdateRound.MarshalMsg(o) + } + if (zb0001Mask & 0x20) == 0 { // if not empty + // string "vlv" + o = append(o, 0xa3, 0x76, 0x6c, 0x76) + o = (*z).VoteLastValid.MarshalMsg(o) + } + } + return +} + +func (_ *OnlineAccountRecordV6) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*OnlineAccountRecordV6) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *OnlineAccountRecordV6) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []byte, err error) { + if st.AllowableDepth == 0 { + err = msgp.ErrMaxDepthExceeded{} + return + } + st.AllowableDepth-- + var field []byte + _ = field + var zb0001 int + var zb0002 bool + zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).Address.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Address") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).UpdateRound.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "UpdateRound") + return + } + } + if zb0001 > 0 { + zb0001-- + (*z).NormalizedOnlineBalance, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "NormalizedOnlineBalance") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).VoteLastValid.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "VoteLastValid") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).Data.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Data") + return + } + } + if zb0001 > 0 { + err = msgp.ErrTooManyArrayFields(zb0001) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0002 { + (*z) = OnlineAccountRecordV6{} + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "addr": + bts, err = (*z).Address.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "Address") + return + } + case "upd": + bts, err = (*z).UpdateRound.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "UpdateRound") + return + } + case "nob": + (*z).NormalizedOnlineBalance, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "NormalizedOnlineBalance") + return + } + case "vlv": + bts, err = (*z).VoteLastValid.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "VoteLastValid") + return + } + case "data": + bts, err = (*z).Data.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "Data") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (z *OnlineAccountRecordV6) UnmarshalMsg(bts []byte) (o []byte, err error) { + return z.UnmarshalMsgWithState(bts, msgp.DefaultUnmarshalState) +} +func (_ *OnlineAccountRecordV6) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*OnlineAccountRecordV6) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *OnlineAccountRecordV6) Msgsize() (s int) { + s = 1 + 5 + (*z).Address.Msgsize() + 4 + (*z).UpdateRound.Msgsize() + 4 + msgp.Uint64Size + 4 + (*z).VoteLastValid.Msgsize() + 5 + (*z).Data.Msgsize() + return +} + +// MsgIsZero returns whether this is a zero value +func (z *OnlineAccountRecordV6) MsgIsZero() bool { + return ((*z).Address.MsgIsZero()) && ((*z).UpdateRound.MsgIsZero()) && ((*z).NormalizedOnlineBalance == 0) && ((*z).VoteLastValid.MsgIsZero()) && ((*z).Data.MsgIsZero()) +} + +// MaxSize returns a maximum valid message size for this message type +func OnlineAccountRecordV6MaxSize() (s int) { + s = 1 + 5 + basics.AddressMaxSize() + 4 + basics.RoundMaxSize() + 4 + msgp.Uint64Size + 4 + basics.RoundMaxSize() + 5 + panic("Unable to determine max size: MaxSize() not implemented for Raw type") + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *OnlineRoundParamsRecordV6) MarshalMsg(b []byte) (o []byte) { + o = msgp.Require(b, z.Msgsize()) + // omitempty: check for empty values + zb0001Len := uint32(2) + var zb0001Mask uint8 /* 3 bits */ + if (*z).Data.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x2 + } + if (*z).Round.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x4 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + if zb0001Len != 0 { + if (zb0001Mask & 0x2) == 0 { // if not empty + // string "data" + o = append(o, 0xa4, 0x64, 0x61, 0x74, 0x61) + o = (*z).Data.MarshalMsg(o) + } + if (zb0001Mask & 0x4) == 0 { // if not empty + // string "rnd" + o = append(o, 0xa3, 0x72, 0x6e, 0x64) + o = (*z).Round.MarshalMsg(o) + } + } + return +} + +func (_ *OnlineRoundParamsRecordV6) CanMarshalMsg(z interface{}) bool { + _, ok := (z).(*OnlineRoundParamsRecordV6) + return ok +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *OnlineRoundParamsRecordV6) UnmarshalMsgWithState(bts []byte, st msgp.UnmarshalState) (o []byte, err error) { + if st.AllowableDepth == 0 { + err = msgp.ErrMaxDepthExceeded{} + return + } + st.AllowableDepth-- + var field []byte + _ = field + var zb0001 int + var zb0002 bool + zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if _, ok := err.(msgp.TypeError); ok { + zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).Round.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Round") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).Data.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "Data") + return + } + } + if zb0001 > 0 { + err = msgp.ErrTooManyArrayFields(zb0001) + if err != nil { + err = msgp.WrapError(err, "struct-from-array") + return + } + } + } else { + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0002 { + (*z) = OnlineRoundParamsRecordV6{} + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch string(field) { + case "rnd": + bts, err = (*z).Round.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "Round") + return + } + case "data": + bts, err = (*z).Data.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "Data") + return + } + default: + err = msgp.ErrNoField(string(field)) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + } + o = bts + return +} + +func (z *OnlineRoundParamsRecordV6) UnmarshalMsg(bts []byte) (o []byte, err error) { + return z.UnmarshalMsgWithState(bts, msgp.DefaultUnmarshalState) +} +func (_ *OnlineRoundParamsRecordV6) CanUnmarshalMsg(z interface{}) bool { + _, ok := (z).(*OnlineRoundParamsRecordV6) + return ok +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *OnlineRoundParamsRecordV6) Msgsize() (s int) { + s = 1 + 4 + (*z).Round.Msgsize() + 5 + (*z).Data.Msgsize() + return +} + +// MsgIsZero returns whether this is a zero value +func (z *OnlineRoundParamsRecordV6) MsgIsZero() bool { + return ((*z).Round.MsgIsZero()) && ((*z).Data.MsgIsZero()) +} + +// MaxSize returns a maximum valid message size for this message type +func OnlineRoundParamsRecordV6MaxSize() (s int) { + s = 1 + 4 + basics.RoundMaxSize() + 5 + panic("Unable to determine max size: MaxSize() not implemented for Raw type") + return +} diff --git a/ledger/encoded/msgp_gen_test.go b/ledger/encoded/msgp_gen_test.go index 415339c728..b905d9616e 100644 --- a/ledger/encoded/msgp_gen_test.go +++ b/ledger/encoded/msgp_gen_test.go @@ -193,3 +193,123 @@ func BenchmarkUnmarshalKVRecordV6(b *testing.B) { } } } + +func TestMarshalUnmarshalOnlineAccountRecordV6(t *testing.T) { + partitiontest.PartitionTest(t) + v := OnlineAccountRecordV6{} + bts := v.MarshalMsg(nil) + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingOnlineAccountRecordV6(t *testing.T) { + protocol.RunEncodingTest(t, &OnlineAccountRecordV6{}) +} + +func BenchmarkMarshalMsgOnlineAccountRecordV6(b *testing.B) { + v := OnlineAccountRecordV6{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgOnlineAccountRecordV6(b *testing.B) { + v := OnlineAccountRecordV6{} + bts := make([]byte, 0, v.Msgsize()) + bts = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalOnlineAccountRecordV6(b *testing.B) { + v := OnlineAccountRecordV6{} + bts := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalOnlineRoundParamsRecordV6(t *testing.T) { + partitiontest.PartitionTest(t) + v := OnlineRoundParamsRecordV6{} + bts := v.MarshalMsg(nil) + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func TestRandomizedEncodingOnlineRoundParamsRecordV6(t *testing.T) { + protocol.RunEncodingTest(t, &OnlineRoundParamsRecordV6{}) +} + +func BenchmarkMarshalMsgOnlineRoundParamsRecordV6(b *testing.B) { + v := OnlineRoundParamsRecordV6{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgOnlineRoundParamsRecordV6(b *testing.B) { + v := OnlineRoundParamsRecordV6{} + bts := make([]byte, 0, v.Msgsize()) + bts = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalOnlineRoundParamsRecordV6(b *testing.B) { + v := OnlineRoundParamsRecordV6{} + bts := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/ledger/encoded/recordsV6.go b/ledger/encoded/recordsV6.go index 520f6f2b8e..2ed4161bf9 100644 --- a/ledger/encoded/recordsV6.go +++ b/ledger/encoded/recordsV6.go @@ -18,6 +18,7 @@ package encoded import ( "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/protocol" "github.com/algorand/msgp/msgp" ) @@ -62,3 +63,32 @@ type KVRecordV6 struct { Key []byte `codec:"k,allocbound=KVRecordV6MaxKeyLength"` Value []byte `codec:"v,allocbound=KVRecordV6MaxValueLength"` } + +// OnlineAccountRecordV6 is an encoded row from the onlineaccounts table, used for catchpoint files. +type OnlineAccountRecordV6 struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + Address basics.Address `codec:"addr,allocbound=crypto.DigestSize"` + UpdateRound basics.Round `codec:"upd"` + NormalizedOnlineBalance uint64 `codec:"nob"` + VoteLastValid basics.Round `codec:"vlv"` + Data msgp.Raw `codec:"data"` // encoding of BaseOnlineAccountData +} + +// ToBeHashed implements crypto.Hashable. +func (r OnlineAccountRecordV6) ToBeHashed() (protocol.HashID, []byte) { + return protocol.OnlineAccount, protocol.Encode(&r) +} + +// OnlineRoundParamsRecordV6 is an encoded row from the onlineroundparams table, used for catchpoint files. +type OnlineRoundParamsRecordV6 struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + + Round basics.Round `codec:"rnd"` + Data msgp.Raw `codec:"data"` // encoding of OnlineRoundParamsData +} + +// ToBeHashed implements crypto.Hashable. +func (r OnlineRoundParamsRecordV6) ToBeHashed() (protocol.HashID, []byte) { + return protocol.OnlineRoundParams, protocol.Encode(&r) +} diff --git a/ledger/ledgercore/catchpointlabel.go b/ledger/ledgercore/catchpointlabel.go index b80a0bc1e2..5a7bf3b0c2 100644 --- a/ledger/ledgercore/catchpointlabel.go +++ b/ledger/ledgercore/catchpointlabel.go @@ -82,32 +82,64 @@ func (l *CatchpointLabelMakerV6) message() string { return fmt.Sprintf("round=%d, block digest=%s, accounts digest=%s", l.ledgerRound, l.ledgerRoundBlockHash, l.balancesMerkleRoot) } -// CatchpointLabelMakerCurrent represent a single catchpoint maker, matching catchpoints of version V7 and above. +// CatchpointLabelMakerCurrent represents a single catchpoint maker, matching catchpoints of version V7 and above. type CatchpointLabelMakerCurrent struct { - v6Label CatchpointLabelMakerV6 - spVerificationHash crypto.Digest + v7Label CatchpointLabelMakerV7 + onlineAccountsHash crypto.Digest + onlineRoundParamsHash crypto.Digest } // MakeCatchpointLabelMakerCurrent creates a catchpoint label given the catchpoint label parameters. func MakeCatchpointLabelMakerCurrent(ledgerRound basics.Round, ledgerRoundBlockHash *crypto.Digest, - balancesMerkleRoot *crypto.Digest, totals AccountTotals, spVerificationContextHash *crypto.Digest) *CatchpointLabelMakerCurrent { + balancesMerkleRoot *crypto.Digest, totals AccountTotals, spVerificationContextHash, onlineAccountsHash, onlineRoundParamsHash *crypto.Digest) *CatchpointLabelMakerCurrent { return &CatchpointLabelMakerCurrent{ + v7Label: *MakeCatchpointLabelMakerV7(ledgerRound, ledgerRoundBlockHash, balancesMerkleRoot, totals, spVerificationContextHash), + onlineAccountsHash: *onlineAccountsHash, + onlineRoundParamsHash: *onlineRoundParamsHash, + } +} + +func (l *CatchpointLabelMakerCurrent) buffer() []byte { + v6Buffer := l.v7Label.buffer() + v6Buffer = append(v6Buffer, l.onlineAccountsHash[:]...) + v6Buffer = append(v6Buffer, l.onlineRoundParamsHash[:]...) + return v6Buffer +} + +func (l *CatchpointLabelMakerCurrent) round() basics.Round { + return l.v7Label.round() +} + +func (l *CatchpointLabelMakerCurrent) message() string { + return fmt.Sprintf("%s onlineaccts digest=%s onlineroundparams digest=%s", l.v7Label.message(), l.onlineAccountsHash, l.onlineRoundParamsHash) +} + +// CatchpointLabelMakerV7 represents a single catchpoint maker, matching catchpoints of version V7 and above. +type CatchpointLabelMakerV7 struct { + v6Label CatchpointLabelMakerV6 + spVerificationHash crypto.Digest +} + +// MakeCatchpointLabelMakerV7 creates a catchpoint label given the catchpoint label parameters. +func MakeCatchpointLabelMakerV7(ledgerRound basics.Round, ledgerRoundBlockHash *crypto.Digest, + balancesMerkleRoot *crypto.Digest, totals AccountTotals, spVerificationContextHash *crypto.Digest) *CatchpointLabelMakerV7 { + return &CatchpointLabelMakerV7{ v6Label: *MakeCatchpointLabelMakerV6(ledgerRound, ledgerRoundBlockHash, balancesMerkleRoot, totals), spVerificationHash: *spVerificationContextHash, } } -func (l *CatchpointLabelMakerCurrent) buffer() []byte { +func (l *CatchpointLabelMakerV7) buffer() []byte { v6Buffer := l.v6Label.buffer() return append(v6Buffer, l.spVerificationHash[:]...) } -func (l *CatchpointLabelMakerCurrent) round() basics.Round { +func (l *CatchpointLabelMakerV7) round() basics.Round { return l.v6Label.round() } -func (l *CatchpointLabelMakerCurrent) message() string { +func (l *CatchpointLabelMakerV7) message() string { return fmt.Sprintf("%s spver digest=%s", l.v6Label.message(), l.spVerificationHash) } diff --git a/ledger/ledgercore/catchpointlabel_test.go b/ledger/ledgercore/catchpointlabel_test.go index d5c8a9b0e7..f76e559d30 100644 --- a/ledger/ledgercore/catchpointlabel_test.go +++ b/ledger/ledgercore/catchpointlabel_test.go @@ -51,7 +51,7 @@ func TestUniqueCatchpointLabel(t *testing.T) { for _, balancesMerkleRoot := range balancesMerkleRoots { for _, stateProofVerificationContextHash := range stateProofVerificationContextHashes { for _, total := range totals { - labelMaker := MakeCatchpointLabelMakerCurrent(r, &ledgerRoundHash, &balancesMerkleRoot, total, &stateProofVerificationContextHash) + labelMaker := MakeCatchpointLabelMakerV7(r, &ledgerRoundHash, &balancesMerkleRoot, total, &stateProofVerificationContextHash) labelString := MakeLabel(labelMaker) require.False(t, uniqueSet[labelString]) uniqueSet[labelString] = true @@ -85,7 +85,7 @@ func TestCatchpointLabelParsing(t *testing.T) { for _, balancesMerkleRoot := range balancesMerkleRoots { for _, stateProofVerificationContextHash := range stateProofVerificationContextHashes { for _, total := range totals { - labelMaker := MakeCatchpointLabelMakerCurrent(r, &ledgerRoundHash, &balancesMerkleRoot, total, &stateProofVerificationContextHash) + labelMaker := MakeCatchpointLabelMakerV7(r, &ledgerRoundHash, &balancesMerkleRoot, total, &stateProofVerificationContextHash) labelString := MakeLabel(labelMaker) parsedRound, parsedHash, err := ParseCatchpointLabel(labelString) require.Equal(t, r, parsedRound) diff --git a/ledger/msgp_gen.go b/ledger/msgp_gen.go index 603c83fdcc..7aee9fd9e6 100644 --- a/ledger/msgp_gen.go +++ b/ledger/msgp_gen.go @@ -127,8 +127,8 @@ func CatchpointCatchupStateMaxSize() (s int) { func (z *CatchpointFileHeader) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) // omitempty: check for empty values - zb0001Len := uint32(9) - var zb0001Mask uint16 /* 10 bits */ + zb0001Len := uint32(11) + var zb0001Mask uint16 /* 12 bits */ if (*z).Totals.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x2 @@ -161,10 +161,18 @@ func (z *CatchpointFileHeader) MarshalMsg(b []byte) (o []byte) { zb0001Len-- zb0001Mask |= 0x100 } - if (*z).Version == 0 { + if (*z).TotalOnlineAccounts == 0 { zb0001Len-- zb0001Mask |= 0x200 } + if (*z).TotalOnlineRoundParams == 0 { + zb0001Len-- + zb0001Mask |= 0x400 + } + if (*z).Version == 0 { + zb0001Len-- + zb0001Mask |= 0x800 + } // variable map header, size zb0001Len o = append(o, 0x80|uint8(zb0001Len)) if zb0001Len != 0 { @@ -209,6 +217,16 @@ func (z *CatchpointFileHeader) MarshalMsg(b []byte) (o []byte) { o = msgp.AppendUint64(o, (*z).TotalKVs) } if (zb0001Mask & 0x200) == 0 { // if not empty + // string "onlineAccountsCount" + o = append(o, 0xb3, 0x6f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74) + o = msgp.AppendUint64(o, (*z).TotalOnlineAccounts) + } + if (zb0001Mask & 0x400) == 0 { // if not empty + // string "onlineRoundParamsCount" + o = append(o, 0xb6, 0x6f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74) + o = msgp.AppendUint64(o, (*z).TotalOnlineRoundParams) + } + if (zb0001Mask & 0x800) == 0 { // if not empty // string "version" o = append(o, 0xa7, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) o = msgp.AppendUint64(o, (*z).Version) @@ -296,6 +314,22 @@ func (z *CatchpointFileHeader) UnmarshalMsgWithState(bts []byte, st msgp.Unmarsh return } } + if zb0001 > 0 { + zb0001-- + (*z).TotalOnlineAccounts, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "TotalOnlineAccounts") + return + } + } + if zb0001 > 0 { + zb0001-- + (*z).TotalOnlineRoundParams, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "TotalOnlineRoundParams") + return + } + } if zb0001 > 0 { zb0001-- (*z).Catchpoint, bts, err = msgp.ReadStringBytes(bts) @@ -377,6 +411,18 @@ func (z *CatchpointFileHeader) UnmarshalMsgWithState(bts []byte, st msgp.Unmarsh err = msgp.WrapError(err, "TotalKVs") return } + case "onlineAccountsCount": + (*z).TotalOnlineAccounts, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TotalOnlineAccounts") + return + } + case "onlineRoundParamsCount": + (*z).TotalOnlineRoundParams, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TotalOnlineRoundParams") + return + } case "catchpoint": (*z).Catchpoint, bts, err = msgp.ReadStringBytes(bts) if err != nil { @@ -412,18 +458,18 @@ func (_ *CatchpointFileHeader) CanUnmarshalMsg(z interface{}) bool { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *CatchpointFileHeader) Msgsize() (s int) { - s = 1 + 8 + msgp.Uint64Size + 14 + (*z).BalancesRound.Msgsize() + 12 + (*z).BlocksRound.Msgsize() + 14 + (*z).Totals.Msgsize() + 14 + msgp.Uint64Size + 12 + msgp.Uint64Size + 9 + msgp.Uint64Size + 11 + msgp.StringPrefixSize + len((*z).Catchpoint) + 18 + (*z).BlockHeaderDigest.Msgsize() + s = 1 + 8 + msgp.Uint64Size + 14 + (*z).BalancesRound.Msgsize() + 12 + (*z).BlocksRound.Msgsize() + 14 + (*z).Totals.Msgsize() + 14 + msgp.Uint64Size + 12 + msgp.Uint64Size + 9 + msgp.Uint64Size + 20 + msgp.Uint64Size + 23 + msgp.Uint64Size + 11 + msgp.StringPrefixSize + len((*z).Catchpoint) + 18 + (*z).BlockHeaderDigest.Msgsize() return } // MsgIsZero returns whether this is a zero value func (z *CatchpointFileHeader) MsgIsZero() bool { - return ((*z).Version == 0) && ((*z).BalancesRound.MsgIsZero()) && ((*z).BlocksRound.MsgIsZero()) && ((*z).Totals.MsgIsZero()) && ((*z).TotalAccounts == 0) && ((*z).TotalChunks == 0) && ((*z).TotalKVs == 0) && ((*z).Catchpoint == "") && ((*z).BlockHeaderDigest.MsgIsZero()) + return ((*z).Version == 0) && ((*z).BalancesRound.MsgIsZero()) && ((*z).BlocksRound.MsgIsZero()) && ((*z).Totals.MsgIsZero()) && ((*z).TotalAccounts == 0) && ((*z).TotalChunks == 0) && ((*z).TotalKVs == 0) && ((*z).TotalOnlineAccounts == 0) && ((*z).TotalOnlineRoundParams == 0) && ((*z).Catchpoint == "") && ((*z).BlockHeaderDigest.MsgIsZero()) } // MaxSize returns a maximum valid message size for this message type func CatchpointFileHeaderMaxSize() (s int) { - s = 1 + 8 + msgp.Uint64Size + 14 + basics.RoundMaxSize() + 12 + basics.RoundMaxSize() + 14 + ledgercore.AccountTotalsMaxSize() + 14 + msgp.Uint64Size + 12 + msgp.Uint64Size + 9 + msgp.Uint64Size + 11 + s = 1 + 8 + msgp.Uint64Size + 14 + basics.RoundMaxSize() + 12 + basics.RoundMaxSize() + 14 + ledgercore.AccountTotalsMaxSize() + 14 + msgp.Uint64Size + 12 + msgp.Uint64Size + 9 + msgp.Uint64Size + 20 + msgp.Uint64Size + 23 + msgp.Uint64Size + 11 panic("Unable to determine max size: String type z.Catchpoint is unbounded") s += 18 + crypto.DigestMaxSize() return @@ -607,20 +653,28 @@ func CatchpointFileBalancesChunkV5MaxSize() (s int) { func (z *catchpointFileChunkV6) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) // omitempty: check for empty values - zb0003Len := uint32(2) - var zb0003Mask uint8 /* 4 bits */ + zb0005Len := uint32(4) + var zb0005Mask uint8 /* 6 bits */ if len((*z).Balances) == 0 { - zb0003Len-- - zb0003Mask |= 0x2 + zb0005Len-- + zb0005Mask |= 0x2 } if len((*z).KVs) == 0 { - zb0003Len-- - zb0003Mask |= 0x4 + zb0005Len-- + zb0005Mask |= 0x4 + } + if len((*z).OnlineAccounts) == 0 { + zb0005Len-- + zb0005Mask |= 0x10 } - // variable map header, size zb0003Len - o = append(o, 0x80|uint8(zb0003Len)) - if zb0003Len != 0 { - if (zb0003Mask & 0x2) == 0 { // if not empty + if len((*z).OnlineRoundParams) == 0 { + zb0005Len-- + zb0005Mask |= 0x20 + } + // variable map header, size zb0005Len + o = append(o, 0x80|uint8(zb0005Len)) + if zb0005Len != 0 { + if (zb0005Mask & 0x2) == 0 { // if not empty // string "bl" o = append(o, 0xa2, 0x62, 0x6c) if (*z).Balances == nil { @@ -632,7 +686,7 @@ func (z *catchpointFileChunkV6) MarshalMsg(b []byte) (o []byte) { o = (*z).Balances[zb0001].MarshalMsg(o) } } - if (zb0003Mask & 0x4) == 0 { // if not empty + if (zb0005Mask & 0x4) == 0 { // if not empty // string "kv" o = append(o, 0xa2, 0x6b, 0x76) if (*z).KVs == nil { @@ -644,6 +698,30 @@ func (z *catchpointFileChunkV6) MarshalMsg(b []byte) (o []byte) { o = (*z).KVs[zb0002].MarshalMsg(o) } } + if (zb0005Mask & 0x10) == 0 { // if not empty + // string "oa" + o = append(o, 0xa2, 0x6f, 0x61) + if (*z).OnlineAccounts == nil { + o = msgp.AppendNil(o) + } else { + o = msgp.AppendArrayHeader(o, uint32(len((*z).OnlineAccounts))) + } + for zb0003 := range (*z).OnlineAccounts { + o = (*z).OnlineAccounts[zb0003].MarshalMsg(o) + } + } + if (zb0005Mask & 0x20) == 0 { // if not empty + // string "orp" + o = append(o, 0xa3, 0x6f, 0x72, 0x70) + if (*z).OnlineRoundParams == nil { + o = msgp.AppendNil(o) + } else { + o = msgp.AppendArrayHeader(o, uint32(len((*z).OnlineRoundParams))) + } + for zb0004 := range (*z).OnlineRoundParams { + o = (*z).OnlineRoundParams[zb0004].MarshalMsg(o) + } + } } return } @@ -662,35 +740,35 @@ func (z *catchpointFileChunkV6) UnmarshalMsgWithState(bts []byte, st msgp.Unmars st.AllowableDepth-- var field []byte _ = field - var zb0003 int - var zb0004 bool - zb0003, zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) + var zb0005 int + var zb0006 bool + zb0005, zb0006, bts, err = msgp.ReadMapHeaderBytes(bts) if _, ok := err.(msgp.TypeError); ok { - zb0003, zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts) + zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { err = msgp.WrapError(err) return } - if zb0003 > 0 { - zb0003-- - var zb0005 int - var zb0006 bool - zb0005, zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts) + if zb0005 > 0 { + zb0005-- + var zb0007 int + var zb0008 bool + zb0007, zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "struct-from-array", "Balances") return } - if zb0005 > BalancesPerCatchpointFileChunk { - err = msgp.ErrOverflow(uint64(zb0005), uint64(BalancesPerCatchpointFileChunk)) + if zb0007 > BalancesPerCatchpointFileChunk { + err = msgp.ErrOverflow(uint64(zb0007), uint64(BalancesPerCatchpointFileChunk)) err = msgp.WrapError(err, "struct-from-array", "Balances") return } - if zb0006 { + if zb0008 { (*z).Balances = nil - } else if (*z).Balances != nil && cap((*z).Balances) >= zb0005 { - (*z).Balances = ((*z).Balances)[:zb0005] + } else if (*z).Balances != nil && cap((*z).Balances) >= zb0007 { + (*z).Balances = ((*z).Balances)[:zb0007] } else { - (*z).Balances = make([]encoded.BalanceRecordV6, zb0005) + (*z).Balances = make([]encoded.BalanceRecordV6, zb0007) } for zb0001 := range (*z).Balances { bts, err = (*z).Balances[zb0001].UnmarshalMsgWithState(bts, st) @@ -700,26 +778,26 @@ func (z *catchpointFileChunkV6) UnmarshalMsgWithState(bts []byte, st msgp.Unmars } } } - if zb0003 > 0 { - zb0003-- - var zb0007 int - var zb0008 bool - zb0007, zb0008, bts, err = msgp.ReadArrayHeaderBytes(bts) + if zb0005 > 0 { + zb0005-- + var zb0009 int + var zb0010 bool + zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "struct-from-array", "KVs") return } - if zb0007 > BalancesPerCatchpointFileChunk { - err = msgp.ErrOverflow(uint64(zb0007), uint64(BalancesPerCatchpointFileChunk)) + if zb0009 > BalancesPerCatchpointFileChunk { + err = msgp.ErrOverflow(uint64(zb0009), uint64(BalancesPerCatchpointFileChunk)) err = msgp.WrapError(err, "struct-from-array", "KVs") return } - if zb0008 { + if zb0010 { (*z).KVs = nil - } else if (*z).KVs != nil && cap((*z).KVs) >= zb0007 { - (*z).KVs = ((*z).KVs)[:zb0007] + } else if (*z).KVs != nil && cap((*z).KVs) >= zb0009 { + (*z).KVs = ((*z).KVs)[:zb0009] } else { - (*z).KVs = make([]encoded.KVRecordV6, zb0007) + (*z).KVs = make([]encoded.KVRecordV6, zb0009) } for zb0002 := range (*z).KVs { bts, err = (*z).KVs[zb0002].UnmarshalMsgWithState(bts, st) @@ -729,8 +807,66 @@ func (z *catchpointFileChunkV6) UnmarshalMsgWithState(bts []byte, st msgp.Unmars } } } - if zb0003 > 0 { - err = msgp.ErrTooManyArrayFields(zb0003) + if zb0005 > 0 { + zb0005-- + var zb0011 int + var zb0012 bool + zb0011, zb0012, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "OnlineAccounts") + return + } + if zb0011 > BalancesPerCatchpointFileChunk { + err = msgp.ErrOverflow(uint64(zb0011), uint64(BalancesPerCatchpointFileChunk)) + err = msgp.WrapError(err, "struct-from-array", "OnlineAccounts") + return + } + if zb0012 { + (*z).OnlineAccounts = nil + } else if (*z).OnlineAccounts != nil && cap((*z).OnlineAccounts) >= zb0011 { + (*z).OnlineAccounts = ((*z).OnlineAccounts)[:zb0011] + } else { + (*z).OnlineAccounts = make([]encoded.OnlineAccountRecordV6, zb0011) + } + for zb0003 := range (*z).OnlineAccounts { + bts, err = (*z).OnlineAccounts[zb0003].UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "OnlineAccounts", zb0003) + return + } + } + } + if zb0005 > 0 { + zb0005-- + var zb0013 int + var zb0014 bool + zb0013, zb0014, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "OnlineRoundParams") + return + } + if zb0013 > BalancesPerCatchpointFileChunk { + err = msgp.ErrOverflow(uint64(zb0013), uint64(BalancesPerCatchpointFileChunk)) + err = msgp.WrapError(err, "struct-from-array", "OnlineRoundParams") + return + } + if zb0014 { + (*z).OnlineRoundParams = nil + } else if (*z).OnlineRoundParams != nil && cap((*z).OnlineRoundParams) >= zb0013 { + (*z).OnlineRoundParams = ((*z).OnlineRoundParams)[:zb0013] + } else { + (*z).OnlineRoundParams = make([]encoded.OnlineRoundParamsRecordV6, zb0013) + } + for zb0004 := range (*z).OnlineRoundParams { + bts, err = (*z).OnlineRoundParams[zb0004].UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "OnlineRoundParams", zb0004) + return + } + } + } + if zb0005 > 0 { + err = msgp.ErrTooManyArrayFields(zb0005) if err != nil { err = msgp.WrapError(err, "struct-from-array") return @@ -741,11 +877,11 @@ func (z *catchpointFileChunkV6) UnmarshalMsgWithState(bts []byte, st msgp.Unmars err = msgp.WrapError(err) return } - if zb0004 { + if zb0006 { (*z) = catchpointFileChunkV6{} } - for zb0003 > 0 { - zb0003-- + for zb0005 > 0 { + zb0005-- field, bts, err = msgp.ReadMapKeyZC(bts) if err != nil { err = msgp.WrapError(err) @@ -753,24 +889,24 @@ func (z *catchpointFileChunkV6) UnmarshalMsgWithState(bts []byte, st msgp.Unmars } switch string(field) { case "bl": - var zb0009 int - var zb0010 bool - zb0009, zb0010, bts, err = msgp.ReadArrayHeaderBytes(bts) + var zb0015 int + var zb0016 bool + zb0015, zb0016, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "Balances") return } - if zb0009 > BalancesPerCatchpointFileChunk { - err = msgp.ErrOverflow(uint64(zb0009), uint64(BalancesPerCatchpointFileChunk)) + if zb0015 > BalancesPerCatchpointFileChunk { + err = msgp.ErrOverflow(uint64(zb0015), uint64(BalancesPerCatchpointFileChunk)) err = msgp.WrapError(err, "Balances") return } - if zb0010 { + if zb0016 { (*z).Balances = nil - } else if (*z).Balances != nil && cap((*z).Balances) >= zb0009 { - (*z).Balances = ((*z).Balances)[:zb0009] + } else if (*z).Balances != nil && cap((*z).Balances) >= zb0015 { + (*z).Balances = ((*z).Balances)[:zb0015] } else { - (*z).Balances = make([]encoded.BalanceRecordV6, zb0009) + (*z).Balances = make([]encoded.BalanceRecordV6, zb0015) } for zb0001 := range (*z).Balances { bts, err = (*z).Balances[zb0001].UnmarshalMsgWithState(bts, st) @@ -780,24 +916,24 @@ func (z *catchpointFileChunkV6) UnmarshalMsgWithState(bts []byte, st msgp.Unmars } } case "kv": - var zb0011 int - var zb0012 bool - zb0011, zb0012, bts, err = msgp.ReadArrayHeaderBytes(bts) + var zb0017 int + var zb0018 bool + zb0017, zb0018, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "KVs") return } - if zb0011 > BalancesPerCatchpointFileChunk { - err = msgp.ErrOverflow(uint64(zb0011), uint64(BalancesPerCatchpointFileChunk)) + if zb0017 > BalancesPerCatchpointFileChunk { + err = msgp.ErrOverflow(uint64(zb0017), uint64(BalancesPerCatchpointFileChunk)) err = msgp.WrapError(err, "KVs") return } - if zb0012 { + if zb0018 { (*z).KVs = nil - } else if (*z).KVs != nil && cap((*z).KVs) >= zb0011 { - (*z).KVs = ((*z).KVs)[:zb0011] + } else if (*z).KVs != nil && cap((*z).KVs) >= zb0017 { + (*z).KVs = ((*z).KVs)[:zb0017] } else { - (*z).KVs = make([]encoded.KVRecordV6, zb0011) + (*z).KVs = make([]encoded.KVRecordV6, zb0017) } for zb0002 := range (*z).KVs { bts, err = (*z).KVs[zb0002].UnmarshalMsgWithState(bts, st) @@ -806,6 +942,60 @@ func (z *catchpointFileChunkV6) UnmarshalMsgWithState(bts []byte, st msgp.Unmars return } } + case "oa": + var zb0019 int + var zb0020 bool + zb0019, zb0020, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "OnlineAccounts") + return + } + if zb0019 > BalancesPerCatchpointFileChunk { + err = msgp.ErrOverflow(uint64(zb0019), uint64(BalancesPerCatchpointFileChunk)) + err = msgp.WrapError(err, "OnlineAccounts") + return + } + if zb0020 { + (*z).OnlineAccounts = nil + } else if (*z).OnlineAccounts != nil && cap((*z).OnlineAccounts) >= zb0019 { + (*z).OnlineAccounts = ((*z).OnlineAccounts)[:zb0019] + } else { + (*z).OnlineAccounts = make([]encoded.OnlineAccountRecordV6, zb0019) + } + for zb0003 := range (*z).OnlineAccounts { + bts, err = (*z).OnlineAccounts[zb0003].UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "OnlineAccounts", zb0003) + return + } + } + case "orp": + var zb0021 int + var zb0022 bool + zb0021, zb0022, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "OnlineRoundParams") + return + } + if zb0021 > BalancesPerCatchpointFileChunk { + err = msgp.ErrOverflow(uint64(zb0021), uint64(BalancesPerCatchpointFileChunk)) + err = msgp.WrapError(err, "OnlineRoundParams") + return + } + if zb0022 { + (*z).OnlineRoundParams = nil + } else if (*z).OnlineRoundParams != nil && cap((*z).OnlineRoundParams) >= zb0021 { + (*z).OnlineRoundParams = ((*z).OnlineRoundParams)[:zb0021] + } else { + (*z).OnlineRoundParams = make([]encoded.OnlineRoundParamsRecordV6, zb0021) + } + for zb0004 := range (*z).OnlineRoundParams { + bts, err = (*z).OnlineRoundParams[zb0004].UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "OnlineRoundParams", zb0004) + return + } + } default: err = msgp.ErrNoField(string(field)) if err != nil { @@ -837,12 +1027,20 @@ func (z *catchpointFileChunkV6) Msgsize() (s int) { for zb0002 := range (*z).KVs { s += (*z).KVs[zb0002].Msgsize() } + s += 3 + msgp.ArrayHeaderSize + for zb0003 := range (*z).OnlineAccounts { + s += (*z).OnlineAccounts[zb0003].Msgsize() + } + s += 4 + msgp.ArrayHeaderSize + for zb0004 := range (*z).OnlineRoundParams { + s += (*z).OnlineRoundParams[zb0004].Msgsize() + } return } // MsgIsZero returns whether this is a zero value func (z *catchpointFileChunkV6) MsgIsZero() bool { - return (len((*z).Balances) == 0) && (len((*z).KVs) == 0) + return (len((*z).Balances) == 0) && (len((*z).KVs) == 0) && (len((*z).OnlineAccounts) == 0) && (len((*z).OnlineRoundParams) == 0) } // MaxSize returns a maximum valid message size for this message type @@ -853,6 +1051,12 @@ func CatchpointFileChunkV6MaxSize() (s int) { s += 3 // Calculating size of slice: z.KVs s += msgp.ArrayHeaderSize + ((BalancesPerCatchpointFileChunk) * (encoded.KVRecordV6MaxSize())) + s += 3 + // Calculating size of slice: z.OnlineAccounts + s += msgp.ArrayHeaderSize + ((BalancesPerCatchpointFileChunk) * (encoded.OnlineAccountRecordV6MaxSize())) + s += 4 + // Calculating size of slice: z.OnlineRoundParams + s += msgp.ArrayHeaderSize + ((BalancesPerCatchpointFileChunk) * (encoded.OnlineRoundParamsRecordV6MaxSize())) return } diff --git a/ledger/store/trackerdb/catchpoint.go b/ledger/store/trackerdb/catchpoint.go index ad6c9a236d..f2d48b0347 100644 --- a/ledger/store/trackerdb/catchpoint.go +++ b/ledger/store/trackerdb/catchpoint.go @@ -125,6 +125,9 @@ type CatchpointFirstStageInfo struct { // data files are generated. TotalKVs uint64 `codec:"kvsCount"` + TotalOnlineAccounts uint64 `codec:"onlineAccountsCount"` + TotalOnlineRoundParams uint64 `codec:"onlineRoundParamsCount"` + // Total number of chunks in the catchpoint data file. Only set when catchpoint // data files are generated. TotalChunks uint64 `codec:"chunksCount"` @@ -133,6 +136,10 @@ type CatchpointFirstStageInfo struct { // StateProofVerificationHash is the hash of the state proof verification data contained in the catchpoint data file. StateProofVerificationHash crypto.Digest `codec:"spVerificationHash"` + + // OnlineAccountsHash and OnlineRoundParamsHash provide verification for these tables in the catchpoint data file. + OnlineAccountsHash crypto.Digest `codec:"onlineAccountsHash"` + OnlineRoundParamsHash crypto.Digest `codec:"onlineRoundParamsHash"` } // MakeCatchpointFilePath builds the path of a catchpoint file. diff --git a/ledger/store/trackerdb/dualdriver/accounts_reader_ext.go b/ledger/store/trackerdb/dualdriver/accounts_reader_ext.go index 6d6b527f48..3113ac86dd 100644 --- a/ledger/store/trackerdb/dualdriver/accounts_reader_ext.go +++ b/ledger/store/trackerdb/dualdriver/accounts_reader_ext.go @@ -360,3 +360,39 @@ func (ar *accountsReaderExt) TotalResources(ctx context.Context) (total uint64, // return primary results return totalP, nil } + +// TotalOnlineAccountRows implements trackerdb.AccountsReaderExt +func (ar *accountsReaderExt) TotalOnlineAccountRows(ctx context.Context) (total uint64, err error) { + totalP, errP := ar.primary.TotalOnlineAccountRows(ctx) + totalS, errS := ar.secondary.TotalOnlineAccountRows(ctx) + // coalesce errors + err = coalesceErrors(errP, errS) + if err != nil { + return + } + // check results match + if totalP != totalS { + err = ErrInconsistentResult + return + } + // return primary results + return totalP, nil +} + +// TotalOnlineRoundParams implements trackerdb.AccountsReaderExt +func (ar *accountsReaderExt) TotalOnlineRoundParams(ctx context.Context) (total uint64, err error) { + totalP, errP := ar.primary.TotalOnlineRoundParams(ctx) + totalS, errS := ar.secondary.TotalOnlineRoundParams(ctx) + // coalesce errors + err = coalesceErrors(errP, errS) + if err != nil { + return + } + // check results match + if totalP != totalS { + err = ErrInconsistentResult + return + } + // return primary results + return totalP, nil +} diff --git a/ledger/store/trackerdb/dualdriver/dualdriver.go b/ledger/store/trackerdb/dualdriver/dualdriver.go index e51b05929f..ed23528675 100644 --- a/ledger/store/trackerdb/dualdriver/dualdriver.go +++ b/ledger/store/trackerdb/dualdriver/dualdriver.go @@ -24,6 +24,7 @@ import ( "sync" "time" + "github.com/algorand/go-algorand/ledger/encoded" "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/util/db" @@ -264,8 +265,8 @@ func (*reader) MakeCatchpointReader() (trackerdb.CatchpointReader, error) { return nil, nil } -// MakeEncodedAccoutsBatchIter implements trackerdb.Reader -func (*reader) MakeEncodedAccoutsBatchIter() trackerdb.EncodedAccountsBatchIter { +// MakeEncodedAccountsBatchIter implements trackerdb.Reader +func (*reader) MakeEncodedAccountsBatchIter() trackerdb.EncodedAccountsBatchIter { // TODO: catchpoint return nil } @@ -276,6 +277,18 @@ func (*reader) MakeKVsIter(ctx context.Context) (trackerdb.KVsIter, error) { return nil, nil } +// MakeOnlineAccountsIter implements trackerdb.Reader +func (*reader) MakeOnlineAccountsIter(ctx context.Context) (trackerdb.TableIterator[*encoded.OnlineAccountRecordV6], error) { + // TODO: catchpoint + return nil, nil +} + +// MakeOnlineRoundParamsIter implements trackerdb.Reader +func (*reader) MakeOnlineRoundParamsIter(ctx context.Context) (trackerdb.TableIterator[*encoded.OnlineRoundParamsRecordV6], error) { + // TODO: catchpoint + return nil, nil +} + type writer struct { primary trackerdb.Writer secondary trackerdb.Writer diff --git a/ledger/store/trackerdb/generickv/accounts_ext_reader.go b/ledger/store/trackerdb/generickv/accounts_ext_reader.go index 79460b51e7..6e5c72daaf 100644 --- a/ledger/store/trackerdb/generickv/accounts_ext_reader.go +++ b/ledger/store/trackerdb/generickv/accounts_ext_reader.go @@ -125,6 +125,16 @@ func (r *accountsReader) TotalKVs(ctx context.Context) (total uint64, err error) return } +func (r *accountsReader) TotalOnlineAccountRows(ctx context.Context) (total uint64, err error) { + // TODO: catchpoint + return +} + +func (r *accountsReader) TotalOnlineRoundParams(ctx context.Context) (total uint64, err error) { + // TODO: catchpoint + return +} + // TODO: this replicates some functionality from LookupOnlineHistory, implemented for onlineAccountsReader func (r *accountsReader) LookupOnlineAccountDataByAddress(addr basics.Address) (ref trackerdb.OnlineAccountRef, data []byte, err error) { low, high := onlineAccountAddressRangePrefix(addr) diff --git a/ledger/store/trackerdb/generickv/reader.go b/ledger/store/trackerdb/generickv/reader.go index 454a4eb9dc..f8422792e5 100644 --- a/ledger/store/trackerdb/generickv/reader.go +++ b/ledger/store/trackerdb/generickv/reader.go @@ -20,6 +20,7 @@ import ( "context" "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/ledger/encoded" "github.com/algorand/go-algorand/ledger/store/trackerdb" ) @@ -65,8 +66,8 @@ func (r *reader) MakeCatchpointReader() (trackerdb.CatchpointReader, error) { panic("unimplemented") } -// MakeEncodedAccoutsBatchIter implements trackerdb.Reader -func (r *reader) MakeEncodedAccoutsBatchIter() trackerdb.EncodedAccountsBatchIter { +// MakeEncodedAccountsBatchIter implements trackerdb.Reader +func (r *reader) MakeEncodedAccountsBatchIter() trackerdb.EncodedAccountsBatchIter { // TODO: catchpoint panic("unimplemented") } @@ -76,3 +77,15 @@ func (r *reader) MakeKVsIter(ctx context.Context) (trackerdb.KVsIter, error) { // TODO: catchpoint panic("unimplemented") } + +// MakeOnlineAccountsIter implements trackerdb.Reader +func (r *reader) MakeOnlineAccountsIter(ctx context.Context) (trackerdb.TableIterator[*encoded.OnlineAccountRecordV6], error) { + // TODO: catchpoint + panic("unimplemented") +} + +// MakeOnlineRoundParamsIter implements trackerdb.Reader +func (r *reader) MakeOnlineRoundParamsIter(ctx context.Context) (trackerdb.TableIterator[*encoded.OnlineRoundParamsRecordV6], error) { + // TODO: catchpoint + panic("unimplemented") +} diff --git a/ledger/store/trackerdb/interface.go b/ledger/store/trackerdb/interface.go index 25d7b79e3c..946056eaf8 100644 --- a/ledger/store/trackerdb/interface.go +++ b/ledger/store/trackerdb/interface.go @@ -126,6 +126,8 @@ type AccountsReaderExt interface { TotalResources(ctx context.Context) (total uint64, err error) TotalAccounts(ctx context.Context) (total uint64, err error) TotalKVs(ctx context.Context) (total uint64, err error) + TotalOnlineAccountRows(ctx context.Context) (total uint64, err error) + TotalOnlineRoundParams(ctx context.Context) (total uint64, err error) AccountsRound() (rnd basics.Round, err error) LookupOnlineAccountDataByAddress(addr basics.Address) (ref OnlineAccountRef, data []byte, err error) AccountsOnlineTop(rnd basics.Round, offset uint64, n uint64, proto config.ConsensusParams) (map[basics.Address]*ledgercore.OnlineAccount, error) @@ -176,10 +178,13 @@ type CatchpointWriter interface { WriteCatchpointStagingBalances(ctx context.Context, bals []NormalizedAccountBalance) error WriteCatchpointStagingKVs(ctx context.Context, keys [][]byte, values [][]byte, hashes [][]byte) error + WriteCatchpointStagingOnlineAccounts(context.Context, []encoded.OnlineAccountRecordV6) error + WriteCatchpointStagingOnlineRoundParams(context.Context, []encoded.OnlineRoundParamsRecordV6) error WriteCatchpointStagingCreatable(ctx context.Context, bals []NormalizedAccountBalance) error WriteCatchpointStagingHashes(ctx context.Context, bals []NormalizedAccountBalance) error ApplyCatchpointStagingBalances(ctx context.Context, balancesRound basics.Round, merkleRootRound basics.Round) (err error) + ApplyCatchpointStagingTablesV7(ctx context.Context) error ResetCatchpointStagingBalances(ctx context.Context, newCatchup bool) (err error) InsertUnfinishedCatchpoint(ctx context.Context, round basics.Round, blockHash crypto.Digest) error @@ -235,6 +240,13 @@ type KVsIter interface { Close() } +// TableIterator is used to add online accounts and online round params to catchpoint files. +type TableIterator[T any] interface { + Next() bool + GetItem() (T, error) + Close() +} + // EncodedAccountsBatchIter is an iterator for a accounts. type EncodedAccountsBatchIter interface { Next(ctx context.Context, accountCount int, resourceCount int) (bals []encoded.BalanceRecordV6, numAccountsProcessed uint64, err error) diff --git a/ledger/store/trackerdb/msgp_gen.go b/ledger/store/trackerdb/msgp_gen.go index 98f35bf519..3c3206e71b 100644 --- a/ledger/store/trackerdb/msgp_gen.go +++ b/ledger/store/trackerdb/msgp_gen.go @@ -1334,8 +1334,8 @@ func BaseVotingDataMaxSize() (s int) { func (z *CatchpointFirstStageInfo) MarshalMsg(b []byte) (o []byte) { o = msgp.Require(b, z.Msgsize()) // omitempty: check for empty values - zb0001Len := uint32(7) - var zb0001Mask uint8 /* 8 bits */ + zb0001Len := uint32(11) + var zb0001Mask uint16 /* 12 bits */ if (*z).Totals.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x2 @@ -1356,14 +1356,30 @@ func (z *CatchpointFirstStageInfo) MarshalMsg(b []byte) (o []byte) { zb0001Len-- zb0001Mask |= 0x20 } - if (*z).StateProofVerificationHash.MsgIsZero() { + if (*z).TotalOnlineAccounts == 0 { zb0001Len-- zb0001Mask |= 0x40 } - if (*z).TrieBalancesHash.MsgIsZero() { + if (*z).OnlineAccountsHash.MsgIsZero() { zb0001Len-- zb0001Mask |= 0x80 } + if (*z).TotalOnlineRoundParams == 0 { + zb0001Len-- + zb0001Mask |= 0x100 + } + if (*z).OnlineRoundParamsHash.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x200 + } + if (*z).StateProofVerificationHash.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x400 + } + if (*z).TrieBalancesHash.MsgIsZero() { + zb0001Len-- + zb0001Mask |= 0x800 + } // variable map header, size zb0001Len o = append(o, 0x80|uint8(zb0001Len)) if zb0001Len != 0 { @@ -1393,11 +1409,31 @@ func (z *CatchpointFirstStageInfo) MarshalMsg(b []byte) (o []byte) { o = msgp.AppendUint64(o, (*z).TotalKVs) } if (zb0001Mask & 0x40) == 0 { // if not empty + // string "onlineAccountsCount" + o = append(o, 0xb3, 0x6f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74) + o = msgp.AppendUint64(o, (*z).TotalOnlineAccounts) + } + if (zb0001Mask & 0x80) == 0 { // if not empty + // string "onlineAccountsHash" + o = append(o, 0xb2, 0x6f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x48, 0x61, 0x73, 0x68) + o = (*z).OnlineAccountsHash.MarshalMsg(o) + } + if (zb0001Mask & 0x100) == 0 { // if not empty + // string "onlineRoundParamsCount" + o = append(o, 0xb6, 0x6f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74) + o = msgp.AppendUint64(o, (*z).TotalOnlineRoundParams) + } + if (zb0001Mask & 0x200) == 0 { // if not empty + // string "onlineRoundParamsHash" + o = append(o, 0xb5, 0x6f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x48, 0x61, 0x73, 0x68) + o = (*z).OnlineRoundParamsHash.MarshalMsg(o) + } + if (zb0001Mask & 0x400) == 0 { // if not empty // string "spVerificationHash" o = append(o, 0xb2, 0x73, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x61, 0x73, 0x68) o = (*z).StateProofVerificationHash.MarshalMsg(o) } - if (zb0001Mask & 0x80) == 0 { // if not empty + if (zb0001Mask & 0x800) == 0 { // if not empty // string "trieBalancesHash" o = append(o, 0xb0, 0x74, 0x72, 0x69, 0x65, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x48, 0x61, 0x73, 0x68) o = (*z).TrieBalancesHash.MarshalMsg(o) @@ -1461,6 +1497,22 @@ func (z *CatchpointFirstStageInfo) UnmarshalMsgWithState(bts []byte, st msgp.Unm return } } + if zb0001 > 0 { + zb0001-- + (*z).TotalOnlineAccounts, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "TotalOnlineAccounts") + return + } + } + if zb0001 > 0 { + zb0001-- + (*z).TotalOnlineRoundParams, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "TotalOnlineRoundParams") + return + } + } if zb0001 > 0 { zb0001-- (*z).TotalChunks, bts, err = msgp.ReadUint64Bytes(bts) @@ -1485,6 +1537,22 @@ func (z *CatchpointFirstStageInfo) UnmarshalMsgWithState(bts []byte, st msgp.Unm return } } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).OnlineAccountsHash.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "OnlineAccountsHash") + return + } + } + if zb0001 > 0 { + zb0001-- + bts, err = (*z).OnlineRoundParamsHash.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "struct-from-array", "OnlineRoundParamsHash") + return + } + } if zb0001 > 0 { err = msgp.ErrTooManyArrayFields(zb0001) if err != nil { @@ -1532,6 +1600,18 @@ func (z *CatchpointFirstStageInfo) UnmarshalMsgWithState(bts []byte, st msgp.Unm err = msgp.WrapError(err, "TotalKVs") return } + case "onlineAccountsCount": + (*z).TotalOnlineAccounts, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TotalOnlineAccounts") + return + } + case "onlineRoundParamsCount": + (*z).TotalOnlineRoundParams, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TotalOnlineRoundParams") + return + } case "chunksCount": (*z).TotalChunks, bts, err = msgp.ReadUint64Bytes(bts) if err != nil { @@ -1550,6 +1630,18 @@ func (z *CatchpointFirstStageInfo) UnmarshalMsgWithState(bts []byte, st msgp.Unm err = msgp.WrapError(err, "StateProofVerificationHash") return } + case "onlineAccountsHash": + bts, err = (*z).OnlineAccountsHash.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "OnlineAccountsHash") + return + } + case "onlineRoundParamsHash": + bts, err = (*z).OnlineRoundParamsHash.UnmarshalMsgWithState(bts, st) + if err != nil { + err = msgp.WrapError(err, "OnlineRoundParamsHash") + return + } default: err = msgp.ErrNoField(string(field)) if err != nil { @@ -1573,18 +1665,18 @@ func (_ *CatchpointFirstStageInfo) CanUnmarshalMsg(z interface{}) bool { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *CatchpointFirstStageInfo) Msgsize() (s int) { - s = 1 + 14 + (*z).Totals.Msgsize() + 17 + (*z).TrieBalancesHash.Msgsize() + 14 + msgp.Uint64Size + 9 + msgp.Uint64Size + 12 + msgp.Uint64Size + 13 + msgp.Uint64Size + 19 + (*z).StateProofVerificationHash.Msgsize() + s = 1 + 14 + (*z).Totals.Msgsize() + 17 + (*z).TrieBalancesHash.Msgsize() + 14 + msgp.Uint64Size + 9 + msgp.Uint64Size + 20 + msgp.Uint64Size + 23 + msgp.Uint64Size + 12 + msgp.Uint64Size + 13 + msgp.Uint64Size + 19 + (*z).StateProofVerificationHash.Msgsize() + 19 + (*z).OnlineAccountsHash.Msgsize() + 22 + (*z).OnlineRoundParamsHash.Msgsize() return } // MsgIsZero returns whether this is a zero value func (z *CatchpointFirstStageInfo) MsgIsZero() bool { - return ((*z).Totals.MsgIsZero()) && ((*z).TrieBalancesHash.MsgIsZero()) && ((*z).TotalAccounts == 0) && ((*z).TotalKVs == 0) && ((*z).TotalChunks == 0) && ((*z).BiggestChunkLen == 0) && ((*z).StateProofVerificationHash.MsgIsZero()) + return ((*z).Totals.MsgIsZero()) && ((*z).TrieBalancesHash.MsgIsZero()) && ((*z).TotalAccounts == 0) && ((*z).TotalKVs == 0) && ((*z).TotalOnlineAccounts == 0) && ((*z).TotalOnlineRoundParams == 0) && ((*z).TotalChunks == 0) && ((*z).BiggestChunkLen == 0) && ((*z).StateProofVerificationHash.MsgIsZero()) && ((*z).OnlineAccountsHash.MsgIsZero()) && ((*z).OnlineRoundParamsHash.MsgIsZero()) } // MaxSize returns a maximum valid message size for this message type func CatchpointFirstStageInfoMaxSize() (s int) { - s = 1 + 14 + ledgercore.AccountTotalsMaxSize() + 17 + crypto.DigestMaxSize() + 14 + msgp.Uint64Size + 9 + msgp.Uint64Size + 12 + msgp.Uint64Size + 13 + msgp.Uint64Size + 19 + crypto.DigestMaxSize() + s = 1 + 14 + ledgercore.AccountTotalsMaxSize() + 17 + crypto.DigestMaxSize() + 14 + msgp.Uint64Size + 9 + msgp.Uint64Size + 20 + msgp.Uint64Size + 23 + msgp.Uint64Size + 12 + msgp.Uint64Size + 13 + msgp.Uint64Size + 19 + crypto.DigestMaxSize() + 19 + crypto.DigestMaxSize() + 22 + crypto.DigestMaxSize() return } diff --git a/ledger/store/trackerdb/sqlitedriver/accountsV2.go b/ledger/store/trackerdb/sqlitedriver/accountsV2.go index 0ba84c84bd..b5443f0cda 100644 --- a/ledger/store/trackerdb/sqlitedriver/accountsV2.go +++ b/ledger/store/trackerdb/sqlitedriver/accountsV2.go @@ -379,6 +379,28 @@ func (r *accountsV2Reader) TotalKVs(ctx context.Context) (total uint64, err erro return } +// TotalOnlineAccountRows returns the total number of rows in the onlineaccounts table. +func (r *accountsV2Reader) TotalOnlineAccountRows(ctx context.Context) (total uint64, err error) { + err = r.q.QueryRowContext(ctx, "SELECT count(1) FROM onlineaccounts").Scan(&total) + if err == sql.ErrNoRows { + total = 0 + err = nil + return + } + return +} + +// TotalOnlineRoundParams returns the total number of rows in the onlineroundparamstail table. +func (r *accountsV2Reader) TotalOnlineRoundParams(ctx context.Context) (total uint64, err error) { + err = r.q.QueryRowContext(ctx, "SELECT count(1) FROM onlineroundparamstail").Scan(&total) + if err == sql.ErrNoRows { + total = 0 + err = nil + return + } + return +} + // LoadTxTail returns the tx tails func (r *accountsV2Reader) LoadTxTail(ctx context.Context, dbRound basics.Round) (roundData []*trackerdb.TxTailRound, roundHash []crypto.Digest, baseRound basics.Round, err error) { rows, err := r.q.QueryContext(ctx, "SELECT rnd, data FROM txtail ORDER BY rnd DESC") diff --git a/ledger/store/trackerdb/sqlitedriver/catchpoint.go b/ledger/store/trackerdb/sqlitedriver/catchpoint.go index cdda10978e..94a0a15b7c 100644 --- a/ledger/store/trackerdb/sqlitedriver/catchpoint.go +++ b/ledger/store/trackerdb/sqlitedriver/catchpoint.go @@ -25,6 +25,7 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/ledger/encoded" "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" @@ -464,6 +465,42 @@ func (cw *catchpointWriter) WriteCatchpointStagingKVs(ctx context.Context, keys return nil } +// WriteCatchpointStagingOnlineAccounts inserts all the onlineaccounts in the provided array +// into the catchpoint staging table catchpointonlineaccounts, and their hashes to the pending +func (cw *catchpointWriter) WriteCatchpointStagingOnlineAccounts(ctx context.Context, oas []encoded.OnlineAccountRecordV6) error { + insertStmt, err := cw.e.PrepareContext(ctx, "INSERT INTO catchpointonlineaccounts(address, updround, normalizedonlinebalance, votelastvalid, data) VALUES(?, ?, ?, ?, ?)") + if err != nil { + return err + } + defer insertStmt.Close() + + for i := 0; i < len(oas); i++ { + _, err := insertStmt.ExecContext(ctx, oas[i].Address[:], oas[i].UpdateRound, oas[i].NormalizedOnlineBalance, oas[i].VoteLastValid, oas[i].Data) + if err != nil { + return err + } + } + return nil +} + +// WriteCatchpointStagingOnlineRoundParams inserts all the online round params in the provided array +// into the catchpoint staging table catchpointonlineroundparamstail, and their hashes to the pending +func (cw *catchpointWriter) WriteCatchpointStagingOnlineRoundParams(ctx context.Context, orps []encoded.OnlineRoundParamsRecordV6) error { + insertStmt, err := cw.e.PrepareContext(ctx, "INSERT INTO catchpointonlineroundparamstail(rnd, data) VALUES(?, ?)") + if err != nil { + return err + } + defer insertStmt.Close() + + for i := 0; i < len(orps); i++ { + _, err := insertStmt.ExecContext(ctx, orps[i].Round, orps[i].Data) + if err != nil { + return err + } + } + return nil +} + func (cw *catchpointWriter) ResetCatchpointStagingBalances(ctx context.Context, newCatchup bool) (err error) { s := []string{ "DROP TABLE IF EXISTS catchpointbalances", @@ -472,6 +509,8 @@ func (cw *catchpointWriter) ResetCatchpointStagingBalances(ctx context.Context, "DROP TABLE IF EXISTS catchpointpendinghashes", "DROP TABLE IF EXISTS catchpointresources", "DROP TABLE IF EXISTS catchpointkvstore", + "DROP TABLE IF EXISTS catchpointonlineaccounts", + "DROP TABLE IF EXISTS catchpointonlineroundparamstail", "DROP TABLE IF EXISTS catchpointstateproofverification", "DELETE FROM accounttotals where id='catchpointStaging'", } @@ -486,6 +525,7 @@ func (cw *catchpointWriter) ResetCatchpointStagingBalances(ctx context.Context, now := time.Now().UnixNano() idxnameBalances := fmt.Sprintf("onlineaccountbals_idx_%d", now) idxnameAddress := fmt.Sprintf("accountbase_address_idx_%d", now) + idxnameOnlineAccounts := fmt.Sprintf("onlineaccountnorm_idx_%d", now) s = append(s, "CREATE TABLE IF NOT EXISTS catchpointassetcreators (asset integer primary key, creator blob, ctype integer)", @@ -494,10 +534,13 @@ func (cw *catchpointWriter) ResetCatchpointStagingBalances(ctx context.Context, "CREATE TABLE IF NOT EXISTS catchpointaccounthashes (id integer primary key, data blob)", "CREATE TABLE IF NOT EXISTS catchpointresources (addrid INTEGER NOT NULL, aidx INTEGER NOT NULL, data BLOB NOT NULL, PRIMARY KEY (addrid, aidx) ) WITHOUT ROWID", "CREATE TABLE IF NOT EXISTS catchpointkvstore (key blob primary key, value blob)", + "CREATE TABLE IF NOT EXISTS catchpointonlineaccounts (address BLOB NOT NULL, updround INTEGER NOT NULL, normalizedonlinebalance INTEGER NOT NULL, votelastvalid INTEGER NOT NULL, data BLOB NOT NULL, PRIMARY KEY (address, updround) )", + "CREATE TABLE IF NOT EXISTS catchpointonlineroundparamstail(rnd INTEGER NOT NULL PRIMARY KEY, data BLOB NOT NULL)", "CREATE TABLE IF NOT EXISTS catchpointstateproofverification (lastattestedround INTEGER PRIMARY KEY NOT NULL, verificationContext BLOB NOT NULL)", createNormalizedOnlineBalanceIndex(idxnameBalances, "catchpointbalances"), // should this be removed ? createUniqueAddressBalanceIndex(idxnameAddress, "catchpointbalances"), + createNormalizedOnlineBalanceIndexOnline(idxnameOnlineAccounts, "catchpointonlineaccounts"), ) } @@ -550,6 +593,25 @@ func (cw *catchpointWriter) ApplyCatchpointStagingBalances(ctx context.Context, return } +// ApplyCatchpointStagingTablesV7 drops the existing onlineaccounts and onlineroundparamstail tables, +// replacing them with data from the catchpoint staging tables. It should only be used for CatchpointFileVersionV8, +// after the ApplyCatchpointStagingBalances function has been run on DB v6, then upgraded to DB v7. +func (cw *catchpointWriter) ApplyCatchpointStagingTablesV7(ctx context.Context) (err error) { + stmts := []string{ + "DROP TABLE IF EXISTS onlineaccounts", + "DROP TABLE IF EXISTS onlineroundparamstail", + "ALTER TABLE catchpointonlineaccounts RENAME TO onlineaccounts", + "ALTER TABLE catchpointonlineroundparamstail RENAME TO onlineroundparamstail", + } + for _, stmt := range stmts { + _, err = cw.e.Exec(stmt) + if err != nil { + return err + } + } + return +} + // CreateCatchpointStagingHashesIndex creates an index on catchpointpendinghashes to allow faster scanning according to the hash order func (cw *catchpointWriter) CreateCatchpointStagingHashesIndex(ctx context.Context) (err error) { _, err = cw.e.ExecContext(ctx, "CREATE INDEX IF NOT EXISTS catchpointpendinghashesidx ON catchpointpendinghashes(data)") diff --git a/ledger/store/trackerdb/sqlitedriver/encodedAccountsIter.go b/ledger/store/trackerdb/sqlitedriver/encodedAccountsIter.go index 7d2829d3e2..ea2dd5e2d8 100644 --- a/ledger/store/trackerdb/sqlitedriver/encodedAccountsIter.go +++ b/ledger/store/trackerdb/sqlitedriver/encodedAccountsIter.go @@ -45,8 +45,8 @@ type catchpointAccountResourceCounter struct { totalAssets uint64 } -// MakeEncodedAccoutsBatchIter creates an empty accounts batch iterator. -func MakeEncodedAccoutsBatchIter(q db.Queryable) *encodedAccountsBatchIter { +// MakeEncodedAccountsBatchIter creates an empty accounts batch iterator. +func MakeEncodedAccountsBatchIter(q db.Queryable) *encodedAccountsBatchIter { return &encodedAccountsBatchIter{q: q} } diff --git a/ledger/store/trackerdb/sqlitedriver/kvsIter.go b/ledger/store/trackerdb/sqlitedriver/kvsIter.go index 4ae08962a0..b85d6c48cb 100644 --- a/ledger/store/trackerdb/sqlitedriver/kvsIter.go +++ b/ledger/store/trackerdb/sqlitedriver/kvsIter.go @@ -19,7 +19,13 @@ package sqlitedriver import ( "context" "database/sql" + "fmt" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/ledger/encoded" + "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/ledger/store/trackerdb" + "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" ) @@ -53,3 +59,111 @@ func (iter *kvsIter) KeyValue() (k []byte, v []byte, err error) { func (iter *kvsIter) Close() { iter.rows.Close() } + +// tableIterator is used to dump onlineaccounts and onlineroundparams tables for catchpoints. +type tableIterator[T any] struct { + rows *sql.Rows + scan func(*sql.Rows) (T, error) +} + +func (iter *tableIterator[T]) Next() bool { return iter.rows.Next() } +func (iter *tableIterator[T]) Close() { iter.rows.Close() } +func (iter *tableIterator[T]) GetItem() (T, error) { + return iter.scan(iter.rows) +} + +// MakeOnlineAccountsIter creates an onlineAccounts iterator. +func MakeOnlineAccountsIter(ctx context.Context, q db.Queryable) (trackerdb.TableIterator[*encoded.OnlineAccountRecordV6], error) { + rows, err := q.QueryContext(ctx, "SELECT address, updround, normalizedonlinebalance, votelastvalid, data FROM onlineaccounts ORDER BY address, updround") + if err != nil { + return nil, err + } + + return &tableIterator[*encoded.OnlineAccountRecordV6]{rows: rows, scan: scanOnlineAccount}, nil +} + +func scanOnlineAccount(rows *sql.Rows) (*encoded.OnlineAccountRecordV6, error) { + var ret encoded.OnlineAccountRecordV6 + var updRound, normBal, lastValid sql.NullInt64 + var addr, data []byte + + err := rows.Scan(&addr, &updRound, &normBal, &lastValid, &data) + if err != nil { + return nil, err + } + if len(addr) != len(ret.Address) { + err = fmt.Errorf("onlineaccounts DB address length mismatch: %d != %d", len(addr), len(ret.Address)) + return nil, err + } + copy(ret.Address[:], addr) + + if !updRound.Valid || updRound.Int64 < 0 { + return nil, fmt.Errorf("invalid updateRound (%v) for online account %s", updRound, ret.Address.String()) + } + ret.UpdateRound = basics.Round(updRound.Int64) + + if !normBal.Valid || normBal.Int64 < 0 { + return nil, fmt.Errorf("invalid norm balance (%v) for online account %s", normBal, ret.Address.String()) + } + ret.NormalizedOnlineBalance = uint64(normBal.Int64) + + if !lastValid.Valid || lastValid.Int64 < 0 { + return nil, fmt.Errorf("invalid lastValid (%v) for online account %s", lastValid, ret.Address) + } + ret.VoteLastValid = basics.Round(lastValid.Int64) + + var oaData trackerdb.BaseOnlineAccountData + err = protocol.Decode(data, &oaData) + if err != nil { + return nil, fmt.Errorf("encoding error for online account %s: %v", ret.Address, err) + } + + // check consistency of the decoded data against row data + // skip checking NormalizedOnlineBalance, requires proto + if ret.VoteLastValid != oaData.VoteLastValid { + return nil, fmt.Errorf("decoded voteLastValid %d does not match row voteLastValid %d", oaData.VoteLastValid, ret.VoteLastValid) + } + + // return original encoded column value + ret.Data = data + + return &ret, nil +} + +// MakeOnlineRoundParamsIter creates an onlineRoundParams iterator. +func MakeOnlineRoundParamsIter(ctx context.Context, q db.Queryable) (trackerdb.TableIterator[*encoded.OnlineRoundParamsRecordV6], error) { + rows, err := q.QueryContext(ctx, "SELECT rnd, data FROM onlineroundparamstail ORDER BY rnd") + if err != nil { + return nil, err + } + + return &tableIterator[*encoded.OnlineRoundParamsRecordV6]{rows: rows, scan: scanOnlineRoundParams}, nil +} + +func scanOnlineRoundParams(rows *sql.Rows) (*encoded.OnlineRoundParamsRecordV6, error) { + var ret encoded.OnlineRoundParamsRecordV6 + var rnd sql.NullInt64 + var data []byte + + err := rows.Scan(&rnd, &data) + if err != nil { + return nil, err + } + + if !rnd.Valid || rnd.Int64 < 0 { + return nil, fmt.Errorf("invalid round (%v) for online round params", rnd) + } + ret.Round = basics.Round(rnd.Int64) + + // test decode + var orpData ledgercore.OnlineRoundParamsData + err = protocol.Decode(data, &orpData) + if err != nil { + return nil, fmt.Errorf("encoding error for online round params round %v: %v", ret.Round, err) + } + + // return original encoded column value + ret.Data = data + + return &ret, nil +} diff --git a/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go b/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go index 54a080fe94..d29fdb9abc 100644 --- a/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go +++ b/ledger/store/trackerdb/sqlitedriver/sqlitedriver.go @@ -25,6 +25,7 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/ledger/encoded" "github.com/algorand/go-algorand/ledger/store/trackerdb" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" @@ -200,9 +201,9 @@ func (r *sqlReader) MakeCatchpointReader() (trackerdb.CatchpointReader, error) { return makeCatchpointReader(r.q), nil } -// MakeEncodedAccoutsBatchIter implements trackerdb.Reader -func (r *sqlReader) MakeEncodedAccoutsBatchIter() trackerdb.EncodedAccountsBatchIter { - return MakeEncodedAccoutsBatchIter(r.q) +// MakeEncodedAccountsBatchIter implements trackerdb.Reader +func (r *sqlReader) MakeEncodedAccountsBatchIter() trackerdb.EncodedAccountsBatchIter { + return MakeEncodedAccountsBatchIter(r.q) } // MakeKVsIter implements trackerdb.Reader @@ -210,6 +211,16 @@ func (r *sqlReader) MakeKVsIter(ctx context.Context) (trackerdb.KVsIter, error) return MakeKVsIter(ctx, r.q) } +// MakeOnlineAccountsIter implements trackerdb.Reader +func (r *sqlReader) MakeOnlineAccountsIter(ctx context.Context) (trackerdb.TableIterator[*encoded.OnlineAccountRecordV6], error) { + return MakeOnlineAccountsIter(ctx, r.q) +} + +// MakeOnlineRoundParamsIter implements trackerdb.Reader +func (r *sqlReader) MakeOnlineRoundParamsIter(ctx context.Context) (trackerdb.TableIterator[*encoded.OnlineRoundParamsRecordV6], error) { + return MakeOnlineRoundParamsIter(ctx, r.q) +} + type sqlWriter struct { e db.Executable } diff --git a/ledger/store/trackerdb/store.go b/ledger/store/trackerdb/store.go index 257ff63842..968c313a81 100644 --- a/ledger/store/trackerdb/store.go +++ b/ledger/store/trackerdb/store.go @@ -20,6 +20,7 @@ import ( "context" "time" + "github.com/algorand/go-algorand/ledger/encoded" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/util/db" ) @@ -63,8 +64,10 @@ type Reader interface { MakeCatchpointPendingHashesIterator(hashCount int) CatchpointPendingHashesIter // Note: Catchpoint tracker needs this on the reader handle in sqlite to not get locked by write txns MakeCatchpointReader() (CatchpointReader, error) - MakeEncodedAccoutsBatchIter() EncodedAccountsBatchIter + MakeEncodedAccountsBatchIter() EncodedAccountsBatchIter MakeKVsIter(ctx context.Context) (KVsIter, error) + MakeOnlineAccountsIter(ctx context.Context) (TableIterator[*encoded.OnlineAccountRecordV6], error) + MakeOnlineRoundParamsIter(ctx context.Context) (TableIterator[*encoded.OnlineRoundParamsRecordV6], error) } // Writer is the interface for the trackerdb write operations. diff --git a/ledger/tracker_test.go b/ledger/tracker_test.go index 3c7bf51faa..324c658591 100644 --- a/ledger/tracker_test.go +++ b/ledger/tracker_test.go @@ -188,9 +188,6 @@ func (t *emptyTracker) postCommit(ctx context.Context, dcc *deferredCommitContex func (t *emptyTracker) postCommitUnlocked(ctx context.Context, dcc *deferredCommitContext) { } -func (t *emptyTracker) clearCommitRoundRetry(ctx context.Context, dcc *deferredCommitContext) { -} - // control functions are not used by the emptyTracker func (t *emptyTracker) handleUnorderedCommit(dcc *deferredCommitContext) { } @@ -198,6 +195,8 @@ func (t *emptyTracker) handlePrepareCommitError(dcc *deferredCommitContext) { } func (t *emptyTracker) handleCommitError(dcc *deferredCommitContext) { } +func (t *emptyTracker) clearCommitRoundRetry(ctx context.Context, dcc *deferredCommitContext) { +} // close is not used by the emptyTracker func (t *emptyTracker) close() { diff --git a/logging/telemetryspec/event.go b/logging/telemetryspec/event.go index 3f17c1bf85..1363646043 100644 --- a/logging/telemetryspec/event.go +++ b/logging/telemetryspec/event.go @@ -322,8 +322,8 @@ type CatchpointGenerationEventDetails struct { BalancesWriteTime uint64 // AccountsCount is the number of accounts that were written into the generated catchpoint file AccountsCount uint64 - // KVsCount is the number of accounts that were written into the generated catchpoint file - KVsCount uint64 + // KVsCount, OnlineAccountsCount, OnlineRoundParamsCount are sizes written into the generated catchpoint file + KVsCount, OnlineAccountsCount, OnlineRoundParamsCount uint64 // FileSize is the size of the catchpoint file, in bytes. FileSize uint64 // MerkleTrieRootHash is the merkle trie root hash represents all accounts and kvs diff --git a/protocol/hash.go b/protocol/hash.go index 906afb2c3d..9390c582c2 100644 --- a/protocol/hash.go +++ b/protocol/hash.go @@ -52,6 +52,8 @@ const ( NetIdentityChallengeResponse HashID = "NIR" NetIdentityVerificationMessage HashID = "NIV" NetPrioResponse HashID = "NPR" + OnlineAccount HashID = "OA" + OnlineRoundParams HashID = "ORP" OneTimeSigKey1 HashID = "OT1" OneTimeSigKey2 HashID = "OT2" PaysetFlat HashID = "PF" diff --git a/test/e2e-go/features/catchup/catchpointCatchup_test.go b/test/e2e-go/features/catchup/catchpointCatchup_test.go index 0a1d522cac..e7408ed55a 100644 --- a/test/e2e-go/features/catchup/catchpointCatchup_test.go +++ b/test/e2e-go/features/catchup/catchpointCatchup_test.go @@ -294,7 +294,7 @@ func TestCatchpointCatchupFailure(t *testing.T) { t.Skip() } - consensusParams := config.Consensus[protocol.ConsensusCurrentVersion] + consensusParams := config.Consensus[protocol.ConsensusFuture] applyCatchpointConsensusChanges(&consensusParams) a := require.New(fixtures.SynchronizedTest(t)) @@ -339,7 +339,7 @@ func TestBasicCatchpointCatchup(t *testing.T) { t.Skip() } - consensusParams := config.Consensus[protocol.ConsensusCurrentVersion] + consensusParams := config.Consensus[protocol.ConsensusFuture] applyCatchpointConsensusChanges(&consensusParams) a := require.New(fixtures.SynchronizedTest(t)) @@ -397,7 +397,7 @@ func TestCatchpointLabelGeneration(t *testing.T) { consensus := make(config.ConsensusProtocols) const consensusCatchpointCatchupTestProtocol = protocol.ConsensusVersion("catchpointtestingprotocol") - catchpointCatchupProtocol := config.Consensus[protocol.ConsensusCurrentVersion] + catchpointCatchupProtocol := config.Consensus[protocol.ConsensusFuture] applyCatchpointConsensusChanges(&catchpointCatchupProtocol) consensus[consensusCatchpointCatchupTestProtocol] = catchpointCatchupProtocol @@ -474,7 +474,7 @@ func TestNodeTxHandlerRestart(t *testing.T) { a := require.New(fixtures.SynchronizedTest(t)) consensus := make(config.ConsensusProtocols) - protoVersion := protocol.ConsensusCurrentVersion + protoVersion := protocol.ConsensusFuture catchpointCatchupProtocol := config.Consensus[protoVersion] applyCatchpointConsensusChanges(&catchpointCatchupProtocol) catchpointCatchupProtocol.StateProofInterval = 0 @@ -581,7 +581,7 @@ func TestReadyEndpoint(t *testing.T) { a := require.New(fixtures.SynchronizedTest(t)) consensus := make(config.ConsensusProtocols) - protoVersion := protocol.ConsensusCurrentVersion + protoVersion := protocol.ConsensusFuture catchpointCatchupProtocol := config.Consensus[protoVersion] applyCatchpointConsensusChanges(&catchpointCatchupProtocol) catchpointCatchupProtocol.StateProofInterval = 0 @@ -720,7 +720,7 @@ func TestNodeTxSyncRestart(t *testing.T) { a := require.New(fixtures.SynchronizedTest(t)) consensus := make(config.ConsensusProtocols) - protoVersion := protocol.ConsensusCurrentVersion + protoVersion := protocol.ConsensusFuture catchpointCatchupProtocol := config.Consensus[protoVersion] prevMaxTxnLife := catchpointCatchupProtocol.MaxTxnLife applyCatchpointConsensusChanges(&catchpointCatchupProtocol) diff --git a/test/e2e-go/features/catchup/stateproofsCatchup_test.go b/test/e2e-go/features/catchup/stateproofsCatchup_test.go index f9639abeb1..69c50f5ccd 100644 --- a/test/e2e-go/features/catchup/stateproofsCatchup_test.go +++ b/test/e2e-go/features/catchup/stateproofsCatchup_test.go @@ -70,7 +70,7 @@ func TestStateProofInReplayCatchpoint(t *testing.T) { a := require.New(fixtures.SynchronizedTest(t)) - consensusParams := config.Consensus[protocol.ConsensusCurrentVersion] + consensusParams := config.Consensus[protocol.ConsensusFuture] applyCatchpointConsensusChanges(&consensusParams) applyCatchpointStateProofConsensusChanges(&consensusParams) @@ -146,7 +146,7 @@ func TestStateProofAfterCatchpoint(t *testing.T) { } a := require.New(fixtures.SynchronizedTest(t)) - consensusParams := config.Consensus[protocol.ConsensusCurrentVersion] + consensusParams := config.Consensus[protocol.ConsensusFuture] applyCatchpointConsensusChanges(&consensusParams) applyCatchpointStateProofConsensusChanges(&consensusParams) consensusParams.StateProofInterval = 16 @@ -211,7 +211,7 @@ func TestSendSigsAfterCatchpointCatchup(t *testing.T) { configurableConsensus := make(config.ConsensusProtocols) consensusVersion := protocol.ConsensusVersion("catchpointtestingprotocol") - consensusParams := config.Consensus[protocol.ConsensusCurrentVersion] + consensusParams := config.Consensus[protocol.ConsensusFuture] applyCatchpointStateProofConsensusChanges(&consensusParams) applyCatchpointConsensusChanges(&consensusParams) // Weight threshold allows creation of state proofs using the primary node and at least one other node.