From fd5c8d78123234a64bc97497322463bcc0162463 Mon Sep 17 00:00:00 2001 From: Ge Gao <106119108+gegaowp@users.noreply.github.com> Date: Mon, 6 Jan 2025 16:16:00 +0700 Subject: [PATCH] snapshot reader: parallel checksum for speed (#20721) ## Description title, before the local run of checksum took about 30min, after it's about 4 min ## Test plan ci & local run before ``` [00:33:57] 726 out of 726 ref files checksummed (Checksumming complete) ``` after ``` [00:04:19] 726 out of 726 ref files checksummed (Checksumming complete) ``` --- ## Release notes Check each box that your changes affect. If none of the boxes relate to your changes, release notes aren't required. For each box you select, include information after the relevant heading that describes the impact of your changes that a user might notice and any actions they must take to implement updates. - [ ] Protocol: - [ ] Nodes (Validators and Full nodes): - [ ] gRPC: - [ ] JSON-RPC: - [ ] GraphQL: - [ ] CLI: - [ ] Rust SDK: --- Cargo.lock | 1 + crates/sui-snapshot/Cargo.toml | 1 + crates/sui-snapshot/src/reader.rs | 72 ++++++++++++++++++++----------- 3 files changed, 49 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dbe849f12cf2d..2e200bd7003ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15218,6 +15218,7 @@ dependencies = [ "serde_json", "sui-config", "sui-core", + "sui-indexer-alt-framework", "sui-protocol-config", "sui-storage", "sui-types", diff --git a/crates/sui-snapshot/Cargo.toml b/crates/sui-snapshot/Cargo.toml index cdc3db01304bc..f223db60dd5a0 100644 --- a/crates/sui-snapshot/Cargo.toml +++ b/crates/sui-snapshot/Cargo.toml @@ -25,6 +25,7 @@ prometheus.workspace = true sui-types.workspace = true sui-config.workspace = true sui-core.workspace = true +sui-indexer-alt-framework.workspace = true sui-storage.workspace = true sui-protocol-config.workspace = true fastcrypto = { workspace = true, features = ["copy_key"] } diff --git a/crates/sui-snapshot/src/reader.rs b/crates/sui-snapshot/src/reader.rs index befe6a857a42d..9119b8a336907 100644 --- a/crates/sui-snapshot/src/reader.rs +++ b/crates/sui-snapshot/src/reader.rs @@ -26,6 +26,7 @@ use std::sync::Arc; use sui_config::object_storage_config::ObjectStoreConfig; use sui_core::authority::authority_store_tables::{AuthorityPerpetualTables, LiveObject}; use sui_core::authority::AuthorityStore; +use sui_indexer_alt_framework::task::TrySpawnStreamExt; use sui_storage::blob::{Blob, BlobEncoding}; use sui_storage::object_store::http::HttpDownloaderBuilder; use sui_storage::object_store::util::{copy_file, copy_files, path_to_filesystem}; @@ -41,6 +42,7 @@ use tracing::{error, info}; pub type SnapshotChecksums = (DigestByBucketAndPartition, Accumulator); pub type DigestByBucketAndPartition = BTreeMap>; pub type Sha3DigestType = Arc>>>; +#[derive(Clone)] pub struct StateSnapshotReaderV1 { epoch: u64, local_staging_dir_root: PathBuf, @@ -235,32 +237,52 @@ impl StateSnapshotReaderV1 { ), ); - for (bucket, part_files) in self.ref_files.clone().iter() { - for (part, _part_file) in part_files.iter() { - let mut sha3_digests = sha3_digests.lock().await; - let ref_iter = self.ref_iter(*bucket, *part)?; - let mut hasher = Sha3_256::default(); - let mut empty = true; - self.object_files - .get(bucket) - .context(format!("No bucket exists for: {bucket}"))? - .get(part) - .context(format!("No part exists for bucket: {bucket}, part: {part}"))?; - for object_ref in ref_iter { - hasher.update(object_ref.2.inner()); - empty = false; - } - if !empty { - sha3_digests - .entry(*bucket) - .or_insert(BTreeMap::new()) - .entry(*part) - .or_insert(hasher.finalize().digest); + let ref_files_iter = self.ref_files.clone().into_iter(); + futures::stream::iter(ref_files_iter) + .flat_map(|(bucket, part_files)| { + futures::stream::iter( + part_files + .into_iter() + .map(move |(part, part_file)| (bucket, part, part_file)), + ) + }) + .try_for_each_spawned(self.concurrency, |(bucket, part, _part_file)| { + let sha3_digests = sha3_digests.clone(); + let object_files = self.object_files.clone(); + let bar = checksum_progress_bar.clone(); + let this = self.clone(); + + async move { + let ref_iter = this.ref_iter(bucket, part)?; + let mut hasher = Sha3_256::default(); + let mut empty = true; + + object_files + .get(&bucket) + .context(format!("No bucket exists for: {bucket}"))? + .get(&part) + .context(format!("No part exists for bucket: {bucket}, part: {part}"))?; + + for object_ref in ref_iter { + hasher.update(object_ref.2.inner()); + empty = false; + } + + if !empty { + let mut digests = sha3_digests.lock().await; + digests + .entry(bucket) + .or_insert(BTreeMap::new()) + .entry(part) + .or_insert(hasher.finalize().digest); + } + + bar.inc(1); + bar.set_message(format!("Bucket: {}, Part: {}", bucket, part)); + Ok::<(), anyhow::Error>(()) } - checksum_progress_bar.inc(1); - checksum_progress_bar.set_message(format!("Bucket: {}, Part: {}", bucket, part)); - } - } + }) + .await?; checksum_progress_bar.finish_with_message("Checksumming complete"); Ok((sha3_digests, num_part_files)) }