From 70974b7712ee382ed6db0f44dc592641b86cc144 Mon Sep 17 00:00:00 2001 From: Alessandro Decina Date: Mon, 9 Dec 2024 09:39:36 +0000 Subject: [PATCH] Reduce StatusCache contention. Remove the global RwLock around the status cache, and introduce more granular RwLocks per-blockhash and per-slot. Additionally, change the internal hash tables from std HashMap to Dashmap, so that operations at the blockhash and slot level can be done only holding read locks. --- Cargo.lock | 11 + Cargo.toml | 3 +- core/Cargo.toml | 1 + core/tests/snapshots.rs | 4 - programs/sbf/Cargo.lock | 9 + runtime/Cargo.toml | 2 + runtime/benches/status_cache.rs | 6 +- runtime/src/bank.rs | 29 +- runtime/src/bank/check_transactions.rs | 3 +- runtime/src/bank_forks.rs | 3 +- runtime/src/snapshot_bank_utils.rs | 8 +- runtime/src/status_cache.rs | 653 +++++++++++++++++-------- sdk/frozen-abi/Cargo.toml | 2 + sdk/frozen-abi/src/abi_example.rs | 26 + svm/examples/Cargo.lock | 9 + 15 files changed, 539 insertions(+), 230 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ecc65cdbe1faf7..d5a49c0515eeb6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1066,6 +1066,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "boxcar" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f839cdf7e2d3198ac6ca003fd8ebc61715755f41c1cad15ff13df67531e00ed" + [[package]] name = "brotli" version = "3.3.4" @@ -1866,6 +1872,7 @@ dependencies = [ "once_cell", "parking_lot_core 0.9.8", "rayon", + "serde", ] [[package]] @@ -7095,8 +7102,10 @@ name = "solana-frozen-abi" version = "2.2.0" dependencies = [ "bitflags 2.6.0", + "boxcar", "bs58", "bv", + "dashmap", "im", "log", "memmap2", @@ -8472,6 +8481,7 @@ dependencies = [ "base64 0.22.1", "bincode", "blake3", + "boxcar", "bv", "bytemuck", "byteorder", @@ -8509,6 +8519,7 @@ dependencies = [ "serde_derive", "serde_json", "serde_with", + "smallvec", "solana-accounts-db", "solana-address-lookup-table-program", "solana-bpf-loader-program", diff --git a/Cargo.toml b/Cargo.toml index 21684e6126f9d1..057ec337c691d8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -270,6 +270,7 @@ bitflags = { version = "2.6.0" } blake3 = "1.5.5" borsh = { version = "1.5.3", features = ["derive", "unstable__schema"] } borsh0-10 = { package = "borsh", version = "0.10.3" } +boxcar = "0.2.7" bs58 = { version = "0.5.1", default-features = false } bv = "0.11.1" byte-unit = "4.0.19" @@ -296,7 +297,7 @@ crossbeam-channel = "0.5.13" csv = "1.3.1" ctrlc = "3.4.5" curve25519-dalek = { version = "4.1.3", features = ["digest", "rand_core"] } -dashmap = "5.5.3" +dashmap = { version = "5.5.3", features = ["serde"] } derivation-path = { version = "0.2.0", default-features = false } derive-where = "1.2.7" dialoguer = "0.10.4" diff --git a/core/Cargo.toml b/core/Cargo.toml index 8f400b02c108ea..af24540eefa2b2 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -114,6 +114,7 @@ solana-logger = { workspace = true } solana-net-utils = { workspace = true, features = ["dev-context-only-utils"] } solana-poh = { workspace = true, features = ["dev-context-only-utils"] } solana-program-runtime = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } solana-stake-program = { workspace = true } solana-unified-scheduler-pool = { workspace = true, features = [ diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 11855f60acf819..e2fb7fd2bb0c64 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -363,11 +363,7 @@ fn test_slots_to_snapshot(snapshot_version: SnapshotVersion, cluster_type: Clust .unwrap() .root_bank() .status_cache - .read() - .unwrap() .roots() - .iter() - .cloned() .sorted(); assert!(slots_to_snapshot.into_iter().eq(expected_slots_to_snapshot)); } diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index ab6585e21f4c92..cff2a81f8e50a3 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -792,6 +792,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "boxcar" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f839cdf7e2d3198ac6ca003fd8ebc61715755f41c1cad15ff13df67531e00ed" + [[package]] name = "brotli" version = "3.3.4" @@ -1322,6 +1328,7 @@ dependencies = [ "once_cell", "parking_lot_core 0.9.8", "rayon", + "serde", ] [[package]] @@ -6679,6 +6686,7 @@ dependencies = [ "base64 0.22.1", "bincode", "blake3", + "boxcar", "bv", "bytemuck", "byteorder 1.5.0", @@ -6711,6 +6719,7 @@ dependencies = [ "serde_derive", "serde_json", "serde_with", + "smallvec", "solana-accounts-db", "solana-address-lookup-table-program", "solana-bpf-loader-program", diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index b7eefcea5a76b2..c3030abccb0188 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -16,6 +16,7 @@ arrayref = { workspace = true } base64 = { workspace = true } bincode = { workspace = true } blake3 = { workspace = true } +boxcar = { workspace = true } bv = { workspace = true, features = ["serde"] } bytemuck = { workspace = true } byteorder = { workspace = true } @@ -48,6 +49,7 @@ serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } serde_json = { workspace = true } serde_with = { workspace = true } +smallvec = { workspace = true } solana-accounts-db = { workspace = true } solana-address-lookup-table-program = { workspace = true } solana-bpf-loader-program = { workspace = true } diff --git a/runtime/benches/status_cache.rs b/runtime/benches/status_cache.rs index 8f25842f1febd3..8780751a7adf7a 100644 --- a/runtime/benches/status_cache.rs +++ b/runtime/benches/status_cache.rs @@ -15,7 +15,7 @@ use { #[bench] fn bench_status_cache_serialize(bencher: &mut Bencher) { - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); status_cache.add_root(0); status_cache.clear(); for hash_index in 0..100 { @@ -30,7 +30,7 @@ fn bench_status_cache_serialize(bencher: &mut Bencher) { status_cache.insert(&blockhash, sig, 0, Ok(())); } } - assert!(status_cache.roots().contains(&0)); + assert!(status_cache.roots().collect::>().contains(&0)); bencher.iter(|| { let _ = serialize(&status_cache.root_slot_deltas()).unwrap(); }); @@ -38,7 +38,7 @@ fn bench_status_cache_serialize(bencher: &mut Bencher) { #[bench] fn bench_status_cache_root_slot_deltas(bencher: &mut Bencher) { - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); // fill the status cache let slots: Vec<_> = (42..).take(MAX_CACHE_ENTRIES).collect(); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index a839b12e0faf10..62790e9a0252bf 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -256,7 +256,7 @@ struct RentMetrics { pub type BankStatusCache = StatusCache>; #[cfg_attr( feature = "frozen-abi", - frozen_abi(digest = "BHg4qpwegtaJypLUqAdjQYzYeLfEGf6tA4U5cREbHMHi") + frozen_abi(digest = "GsPcrVuduzJfWwBamjn4X55mJbM4iFC1uEXQrsAx5vty") )] pub type BankSlotDelta = SlotDelta>; @@ -753,7 +753,7 @@ pub struct Bank { pub rc: BankRc, /// A cache of signature statuses - pub status_cache: Arc>, + pub status_cache: Arc, /// FIFO queue of `recent_blockhash` items blockhash_queue: RwLock, @@ -1102,7 +1102,7 @@ impl Bank { let mut bank = Self { skipped_rewrites: Mutex::default(), rc: BankRc::new(accounts), - status_cache: Arc::>::default(), + status_cache: Arc::::default(), blockhash_queue: RwLock::::default(), ancestors: Ancestors::default(), hash: RwLock::::default(), @@ -1769,7 +1769,7 @@ impl Bank { let mut bank = Self { skipped_rewrites: Mutex::default(), rc: bank_rc, - status_cache: Arc::>::default(), + status_cache: Arc::::default(), blockhash_queue: RwLock::new(fields.blockhash_queue), ancestors, hash: RwLock::new(fields.hash), @@ -2046,7 +2046,7 @@ impl Bank { } pub fn status_cache_ancestors(&self) -> Vec { - let mut roots = self.status_cache.read().unwrap().roots().clone(); + let mut roots = self.status_cache.roots().collect::>(); let min = roots.iter().min().cloned().unwrap_or(0); for ancestor in self.ancestors.keys() { if ancestor >= min { @@ -3193,7 +3193,7 @@ impl Bank { let mut squash_cache_time = Measure::start("squash_cache_time"); roots .iter() - .for_each(|slot| self.status_cache.write().unwrap().add_root(*slot)); + .for_each(|slot| self.status_cache.add_root(*slot)); squash_cache_time.stop(); SquashTiming { @@ -3459,12 +3459,13 @@ impl Bank { } /// Forget all signatures. Useful for benchmarking. + #[cfg(feature = "dev-context-only-utils")] pub fn clear_signatures(&self) { - self.status_cache.write().unwrap().clear(); + self.status_cache.clear(); } pub fn clear_slot_signatures(&self, slot: Slot) { - self.status_cache.write().unwrap().clear_slot_entries(slot); + self.status_cache.clear_slot_entries(slot); } fn update_transaction_statuses( @@ -3472,13 +3473,12 @@ impl Bank { sanitized_txs: &[impl TransactionWithMeta], processing_results: &[TransactionProcessingResult], ) { - let mut status_cache = self.status_cache.write().unwrap(); assert_eq!(sanitized_txs.len(), processing_results.len()); for (tx, processing_result) in sanitized_txs.iter().zip(processing_results) { if let Ok(processed_tx) = &processing_result { // Add the message hash to the status cache to ensure that this message // won't be processed again with a different signature. - status_cache.insert( + self.status_cache.insert( tx.recent_blockhash(), tx.message_hash(), self.slot(), @@ -3487,7 +3487,7 @@ impl Bank { // Add the transaction signature to the status cache so that transaction status // can be queried by transaction signature over RPC. In the future, this should // only be added for API nodes because voting validators don't need to do this. - status_cache.insert( + self.status_cache.insert( tx.recent_blockhash(), tx.signature(), self.slot(), @@ -5635,15 +5635,14 @@ impl Bank { signature: &Signature, blockhash: &Hash, ) -> Option> { - let rcache = self.status_cache.read().unwrap(); - rcache + self.status_cache .get_status(signature, blockhash, &self.ancestors) .map(|v| v.1) } pub fn get_signature_status_slot(&self, signature: &Signature) -> Option<(Slot, Result<()>)> { - let rcache = self.status_cache.read().unwrap(); - rcache.get_status_any_blockhash(signature, &self.ancestors) + self.status_cache + .get_status_any_blockhash(signature, &self.ancestors) } pub fn get_signature_status(&self, signature: &Signature) -> Option> { diff --git a/runtime/src/bank/check_transactions.rs b/runtime/src/bank/check_transactions.rs index 6d966e32ba8931..a2bedcdd6d7041 100644 --- a/runtime/src/bank/check_transactions.rs +++ b/runtime/src/bank/check_transactions.rs @@ -191,13 +191,12 @@ impl Bank { ) -> Vec { // Do allocation before acquiring the lock on the status cache. let mut check_results = Vec::with_capacity(sanitized_txs.len()); - let rcache = self.status_cache.read().unwrap(); check_results.extend(sanitized_txs.iter().zip(lock_results).map( |(sanitized_tx, lock_result)| { let sanitized_tx = sanitized_tx.borrow(); if lock_result.is_ok() - && self.is_transaction_already_processed(sanitized_tx, &rcache) + && self.is_transaction_already_processed(sanitized_tx, &self.status_cache) { error_counters.already_processed += 1; return Err(TransactionError::AlreadyProcessed); diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index 83c2e0ab3fd675..7237a897ca8ee6 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -452,8 +452,7 @@ impl BankForks { if bank.is_startup_verification_complete() { // Save off the status cache because these may get pruned if another // `set_root()` is called before the snapshots package can be generated - let status_cache_slot_deltas = - bank.status_cache.read().unwrap().root_slot_deltas(); + let status_cache_slot_deltas = bank.status_cache.root_slot_deltas(); if let Err(e) = accounts_background_request_sender.send_snapshot_request(SnapshotRequest { snapshot_root_bank: Arc::clone(bank), diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 4d90329a785191..10de13914c86af 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -624,7 +624,7 @@ fn rebuild_bank_from_unarchived_snapshots( verify_slot_deltas(slot_deltas.as_slice(), &bank)?; - bank.status_cache.write().unwrap().append(&slot_deltas); + bank.status_cache.append(&slot_deltas); info!("Rebuilt bank for slot: {}", bank.slot()); Ok(( @@ -686,7 +686,7 @@ fn rebuild_bank_from_snapshot( verify_slot_deltas(slot_deltas.as_slice(), &bank)?; - bank.status_cache.write().unwrap().append(&slot_deltas); + bank.status_cache.append(&slot_deltas); info!("Rebuilt bank for slot: {}", bank.slot()); Ok(( @@ -912,7 +912,7 @@ fn bank_to_full_snapshot_archive_with( bank.update_accounts_hash(CalcAccountsHashDataSource::Storages, false, false); let snapshot_storages = bank.get_snapshot_storages(None); - let status_cache_slot_deltas = bank.status_cache.read().unwrap().root_slot_deltas(); + let status_cache_slot_deltas = bank.status_cache.root_slot_deltas(); let accounts_package = AccountsPackage::new_for_snapshot( AccountsPackageKind::Snapshot(SnapshotKind::FullSnapshot), bank, @@ -975,7 +975,7 @@ pub fn bank_to_incremental_snapshot_archive( bank.update_incremental_accounts_hash(full_snapshot_slot); let snapshot_storages = bank.get_snapshot_storages(Some(full_snapshot_slot)); - let status_cache_slot_deltas = bank.status_cache.read().unwrap().root_slot_deltas(); + let status_cache_slot_deltas = bank.status_cache.root_slot_deltas(); let accounts_package = AccountsPackage::new_for_snapshot( AccountsPackageKind::Snapshot(SnapshotKind::IncrementalSnapshot(full_snapshot_slot)), bank, diff --git a/runtime/src/status_cache.rs b/runtime/src/status_cache.rs index d5340b5a5c5ad1..c4d96de5f5b582 100644 --- a/runtime/src/status_cache.rs +++ b/runtime/src/status_cache.rs @@ -1,119 +1,159 @@ use { - log::*, + ahash::random_state::RandomState as AHashRandomState, + dashmap::{mapref::entry::Entry, DashMap, DashSet}, rand::{thread_rng, Rng}, - serde::Serialize, + serde::{ + de::{SeqAccess, Visitor}, + ser::SerializeSeq as _, + Deserialize, Deserializer, Serialize, + }, + smallvec::SmallVec, solana_accounts_db::ancestors::Ancestors, solana_sdk::{ clock::{Slot, MAX_RECENT_BLOCKHASHES}, hash::Hash, }, std::{ - collections::{hash_map::Entry, HashMap, HashSet}, - sync::{Arc, Mutex}, + fmt, + hash::BuildHasher, + mem::MaybeUninit, + ptr, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, }, }; +// The maximum number of entries to store in the cache. This is the same as the number of recent +// blockhashes because we automatically reject txs that use older blockhashes so we don't need to +// track those explicitly. pub const MAX_CACHE_ENTRIES: usize = MAX_RECENT_BLOCKHASHES; + +// Only store 20 bytes of the tx keys processed to save some memory. const CACHED_KEY_SIZE: usize = 20; -// Store forks in a single chunk of memory to avoid another lookup. -pub type ForkStatus = Vec<(Slot, T)>; +// The number of shards to use for ::slot_deltas. We're going to store at most MAX_CACHE_ENTRIES +// slots, MAX * 4 gives us a low load factor guaranteeing that collisions are vey rare. +const SLOT_SHARDS: usize = (MAX_CACHE_ENTRIES * 4).next_power_of_two(); + +// MAX_CACHE_ENTRIES = MAX_RECENT_BLOCKHASHES. We only insert txs with valid blockhashes, so as +// above multiply by 4 to reduce load factor to make collisions unlikely to happen. +const BLOCKHASH_SHARDS: usize = SLOT_SHARDS; + +// The number of shards used for the maps that hold the actual tx keys. Collisions here are +// inevitable when doing high TPS. The tradeoff is between having too few shards, which would lead +// to contention due to collisions, and too many shards, which would lead to more memory usage and +// contention when first creating the maps (creation happens with write lock held). +const KEY_SHARDS: usize = 1024; + +// Store forks in a single chunk of memory to avoid another hash lookup. Avoid allocations in the +// case a tx only lands in one (the vast majority) or two forks. +pub type ForkStatus = SmallVec<[(Slot, T); 2]>; + +// The type of the key used in the cache. type KeySlice = [u8; CACHED_KEY_SIZE]; -type KeyMap = HashMap>; -// Map of Hash and status -pub type Status = Arc)>>>; -// A Map of hash + the highest fork it's been observed on along with -// the key offset and a Map of the key slice + Fork status for that key -type KeyStatusMap = HashMap)>; -// A map of keys recorded in each fork; used to serialize for snapshots easily. -// Doesn't store a `SlotDelta` in it because the bool `root` is usually set much later -type SlotDeltaMap = HashMap>; +// A map that stores map[tx_key] => [(fork1_slot, tx_result), (fork2_slot, tx_result), ...] +type KeyMap = DashMap, AHashRandomState>; + +// The type used for StatusCache::cache. See the field definition for more details. +type KeyStatusMap = ReadOptimizedDashMap), AHashRandomState>; + +// The inner type of the StatusCache::slot_deltas map. Stores map[blockhash] => [(tx_key, tx_result), ...] +type StatusInner = DashMap), AHashRandomState>; + +// Arc wrapper around StatusInner so the accounts-db can clone the result of +// StatusCache::slot_deltas() cheaply and process it while the cache is being updated. +pub type Status = Arc>; + +// The type used for StatusCache::slot_deltas. See the field definition for more details. +type SlotDeltaMap = ReadOptimizedDashMap, AHashRandomState>; // The statuses added during a slot, can be used to build on top of a status cache or to // construct a new one. Usually derived from a status cache's `SlotDeltaMap` pub type SlotDelta = (Slot, bool, Status); #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] -#[derive(Clone, Debug)] +#[derive(Debug)] pub struct StatusCache { + // cache[blockhash][tx_key] => [(fork1_slot, tx_result), (fork2_slot, tx_result), ...] used to + // check if a tx_key was seen on a fork and for rpc to retrieve the tx_result cache: KeyStatusMap, - roots: HashSet, - /// all keys seen during a fork/slot + // slot_deltas[slot][blockhash] => [(tx_key, tx_result), ...] used to serialize for snapshots + // and to rebuild cache[blockhash][tx_key] from a snapshot slot_deltas: SlotDeltaMap, + // set of rooted slots + roots: DashSet, } impl Default for StatusCache { fn default() -> Self { Self { - cache: HashMap::default(), + cache: ReadOptimizedDashMap::new(DashMap::with_hasher_and_shard_amount( + AHashRandomState::default(), + BLOCKHASH_SHARDS, + )), + slot_deltas: ReadOptimizedDashMap::new(DashMap::with_hasher_and_shard_amount( + AHashRandomState::default(), + SLOT_SHARDS, + )), // 0 is always a root - roots: HashSet::from([0]), - slot_deltas: HashMap::default(), + roots: DashSet::from_iter([0]), } } } -impl PartialEq for StatusCache { - fn eq(&self, other: &Self) -> bool { - self.roots == other.roots - && self - .cache - .iter() - .all(|(hash, (slot, key_index, hash_map))| { - if let Some((other_slot, other_key_index, other_hash_map)) = - other.cache.get(hash) - { - if slot == other_slot && key_index == other_key_index { - return hash_map.iter().all(|(slice, fork_map)| { - if let Some(other_fork_map) = other_hash_map.get(slice) { - // all this work just to compare the highest forks in the fork map - // per entry - return fork_map.last() == other_fork_map.last(); - } - false - }); - } - } - false - }) - } -} - impl StatusCache { - pub fn clear_slot_entries(&mut self, slot: Slot) { - let slot_deltas = self.slot_deltas.remove(&slot); - if let Some(slot_deltas) = slot_deltas { - let slot_deltas = slot_deltas.lock().unwrap(); - for (blockhash, (_, key_list)) in slot_deltas.iter() { - // Any blockhash that exists in self.slot_deltas must also exist - // in self.cache, because in self.purge_roots(), when an entry - // (b, (max_slot, _, _)) is removed from self.cache, this implies - // all entries in self.slot_deltas < max_slot are also removed - if let Entry::Occupied(mut o_blockhash_entries) = self.cache.entry(*blockhash) { - let (_, _, all_hash_maps) = o_blockhash_entries.get_mut(); - - for (key_slice, _) in key_list { - if let Entry::Occupied(mut o_key_list) = all_hash_maps.entry(*key_slice) { - let key_list = o_key_list.get_mut(); - key_list.retain(|(updated_slot, _)| *updated_slot != slot); - if key_list.is_empty() { - o_key_list.remove_entry(); - } - } else { - panic!( - "Map for key must exist if key exists in self.slot_deltas, slot: {slot}" - ) - } - } - - if all_hash_maps.is_empty() { - o_blockhash_entries.remove_entry(); - } - } else { - panic!("Blockhash must exist if it exists in self.slot_deltas, slot: {slot}") + /// Clear all entries for a slot. + /// + /// This is used when a slot is purged from the bank, see + /// ReplayStage::purge_unconfirmed_duplicate_slot(). When this is called, it's guaranteed that + /// there are no threads inserting new entries for this slot, so there are no races. + pub fn clear_slot_entries(&self, slot: Slot) { + // remove txs seen during this slot + let Some((_, slot_deltas)) = self.slot_deltas.remove(&slot) else { + return; + }; + + // loop over all the blockhashes referenced by txs inserted in the slot + for item in slot_deltas.iter() { + // one of the blockhashes referenced + let blockhash = item.key(); + // slot_delta_txs is self.slot_deltas[slot][blockhash], ie the txs that referenced + // `blockhash` in `slot` + let (_key_index, slot_delta_txs) = item.value(); + + // any self.slot_deltas[slot][blockhash] must also exist as self.cache[blockhash] - the + // two maps store the same blockhashes just in a different layout. + let Some(cache_guard) = self.cache.get(blockhash) else { + panic!("Blockhash must exist if it exists in self.slot_deltas, slot: {slot}") + }; + + // cache_txs is self.blockhash_cache[blockhash] + let (_, _, cache_txs) = &*cache_guard; + + // loop over the txs in slot_delta[slot][blockhash] + for (_, (key_slice, _)) in slot_delta_txs { + // find the corresponding tx in self.cache[blockhash] + let Entry::Occupied(mut cache_tx_entry) = cache_txs.entry(*key_slice) else { + panic!("Map for key must exist if key exists in self.slot_deltas, slot: {slot}") + }; + // remove the slot from the list of forks this tx was inserted in + let forks = cache_tx_entry.get_mut(); + forks.retain(|(fork_slot, _key)| *fork_slot != slot); + if forks.is_empty() { + // if all the slots have been cleared or purged, we don't need to track this tx + // anymore + cache_tx_entry.remove(); } } + + // if this blockhash has no more txs, remove it from the cache + if cache_txs.is_empty() { + drop(cache_guard); + self.cache.remove(blockhash); + } } } @@ -122,40 +162,44 @@ impl StatusCache { pub fn get_status>( &self, key: K, - transaction_blockhash: &Hash, + blockhash: &Hash, ancestors: &Ancestors, ) -> Option<(Slot, T)> { - let map = self.cache.get(transaction_blockhash)?; - let (_, index, keymap) = map; + let (_, key_index, txs) = &*self.cache.get(blockhash)?; + self.do_get_status(txs, *key_index, &key, ancestors) + } + + fn do_get_status>( + &self, + txs: &KeyMap, + key_index: usize, + key: &K, + ancestors: &Ancestors, + ) -> Option<(u64, T)> { let max_key_index = key.as_ref().len().saturating_sub(CACHED_KEY_SIZE + 1); - let index = (*index).min(max_key_index); + let index = key_index.min(max_key_index); let key_slice: &[u8; CACHED_KEY_SIZE] = arrayref::array_ref![key.as_ref(), index, CACHED_KEY_SIZE]; - if let Some(stored_forks) = keymap.get(key_slice) { - let res = stored_forks + + txs.get(key_slice).and_then(|forks| { + forks .iter() - .find(|(f, _)| ancestors.contains_key(f) || self.roots.contains(f)) - .cloned(); - if res.is_some() { - return res; - } - } - None + .find(|(slot, _)| ancestors.contains_key(slot) || self.roots.contains(slot)) + .cloned() + }) } - /// Search for a key with any blockhash - /// Prefer get_status for performance reasons, it doesn't need - /// to search all blockhashes. + /// Search for a key with any blockhash. + /// + /// Prefer get_status for performance reasons, it doesn't need to search all blockhashes. pub fn get_status_any_blockhash>( &self, key: K, ancestors: &Ancestors, ) -> Option<(Slot, T)> { - let keys: Vec<_> = self.cache.keys().copied().collect(); - - for blockhash in keys.iter() { - trace!("get_status_any_blockhash: trying {}", blockhash); - let status = self.get_status(&key, blockhash, ancestors); + for item in self.cache.iter() { + let (_, key_index, txs) = &**item.value(); + let status = self.do_get_status(txs, *key_index, &key, ancestors); if status.is_some() { return status; } @@ -163,145 +207,316 @@ impl StatusCache { None } - /// Add a known root fork. Roots are always valid ancestors. - /// After MAX_CACHE_ENTRIES, roots are removed, and any old keys are cleared. - pub fn add_root(&mut self, fork: Slot) { + /// Add a known root fork. + /// + /// Roots are always valid ancestors. After MAX_CACHE_ENTRIES, roots are removed, and any old + /// keys are cleared. + pub fn add_root(&self, fork: Slot) { self.roots.insert(fork); self.purge_roots(); } - pub fn roots(&self) -> &HashSet { - &self.roots + /// Get all the roots. + pub fn roots(&self) -> impl Iterator + '_ { + self.roots.iter().map(|x| *x) } - /// Insert a new key for a specific slot. - pub fn insert>( - &mut self, - transaction_blockhash: &Hash, - key: K, - slot: Slot, - res: T, - ) { + /// Insert a new key using the given blockhash at the given slot. + pub fn insert>(&self, blockhash: &Hash, key: K, slot: Slot, res: T) { let max_key_index = key.as_ref().len().saturating_sub(CACHED_KEY_SIZE + 1); + let mut key_slice = MaybeUninit::<[u8; CACHED_KEY_SIZE]>::uninit(); // Get the cache entry for this blockhash. - let (max_slot, key_index, hash_map) = - self.cache.entry(*transaction_blockhash).or_insert_with(|| { + let key_index = { + let (max_slot, key_index, txs) = &*self.cache.get_or_insert_with(blockhash, || { let key_index = thread_rng().gen_range(0..max_key_index + 1); - (slot, key_index, HashMap::new()) + ( + AtomicU64::new(slot), + key_index, + DashMap::with_hasher_and_shard_amount(AHashRandomState::default(), KEY_SHARDS), + ) }); - // Update the max slot observed to contain txs using this blockhash. - *max_slot = std::cmp::max(slot, *max_slot); + // Update the max slot observed to contain txs using this blockhash. + max_slot.fetch_max(slot, Ordering::Relaxed); - // Grab the key slice. - let key_index = (*key_index).min(max_key_index); - let mut key_slice = [0u8; CACHED_KEY_SIZE]; - key_slice.clone_from_slice(&key.as_ref()[key_index..key_index + CACHED_KEY_SIZE]); + // Grab the key slice. + let key_index = (*key_index).min(max_key_index); + unsafe { + ptr::copy_nonoverlapping( + key.as_ref()[key_index..key_index + CACHED_KEY_SIZE].as_ptr(), + key_slice.as_mut_ptr() as *mut u8, + CACHED_KEY_SIZE, + ) + } + + // Insert the slot and tx result into the cache entry associated with + // this blockhash and keyslice. + let mut forks = txs.entry(unsafe { key_slice.assume_init() }).or_default(); + forks.push((slot, res.clone())); - // Insert the slot and tx result into the cache entry associated with - // this blockhash and keyslice. - let forks = hash_map.entry(key_slice).or_default(); - forks.push((slot, res.clone())); + key_index + }; - self.add_to_slot_delta(transaction_blockhash, slot, key_index, key_slice, res); + self.add_to_slot_delta( + blockhash, + slot, + key_index, + unsafe { key_slice.assume_init() }, + res, + ); } - pub fn purge_roots(&mut self) { + fn purge_roots(&self) { if self.roots.len() > MAX_CACHE_ENTRIES { - if let Some(min) = self.roots.iter().min().cloned() { + if let Some(min) = self.roots().min() { self.roots.remove(&min); - self.cache.retain(|_, (fork, _, _)| *fork > min); + self.cache.retain(|_key, value| { + let (max_slot, _, _) = &**value; + max_slot.load(Ordering::Relaxed) > min + }); self.slot_deltas.retain(|slot, _| *slot > min); } } } - /// Clear for testing - pub fn clear(&mut self) { - for v in self.cache.values_mut() { - v.2 = HashMap::new(); - } - - self.slot_deltas - .iter_mut() - .for_each(|(_, status)| status.lock().unwrap().clear()); + #[cfg(feature = "dev-context-only-utils")] + pub fn clear(&self) { + self.cache.clear(); + self.slot_deltas.clear(); } - /// Get the statuses for all the root slots + /// Get the statuses for all the root slots. + /// + /// This is never called concurrently with add_root(), and for a slot to be a root there must be + /// no new entries for that slot, so there are no races. + /// + /// See ReplayStage::handle_new_root() => BankForks::set_root() => + /// BankForks::do_set_root_return_metrics() => root_slot_deltas() pub fn root_slot_deltas(&self) -> Vec> { self.roots() - .iter() .map(|root| { ( - *root, + root, true, // <-- is_root - self.slot_deltas.get(root).cloned().unwrap_or_default(), + self.slot_deltas.get(&root).unwrap_or_default(), ) }) .collect() } - // replay deltas into a status_cache allows "appending" data - pub fn append(&mut self, slot_deltas: &[SlotDelta]) { + /// Populate the cache with the slot deltas from a snapshot. + /// + /// Really badly named method. See load_bank_forks() => ... => + /// rebuild_bank_from_snapshot() => [load slot deltas from snapshot] => append() + pub fn append(&self, slot_deltas: &[SlotDelta]) { for (slot, is_root, statuses) in slot_deltas { - statuses - .lock() - .unwrap() - .iter() - .for_each(|(tx_hash, (key_index, statuses))| { - for (key_slice, res) in statuses.iter() { - self.insert_with_slice(tx_hash, *slot, *key_index, *key_slice, res.clone()) - } - }); + statuses.iter().for_each(|item| { + let tx_hash = item.key(); + let (key_index, statuses) = item.value(); + for (_, (key_slice, res)) in statuses.iter() { + self.insert_with_slice(tx_hash, *slot, *key_index, *key_slice, res.clone()) + } + }); if *is_root { self.add_root(*slot); } } } - pub fn from_slot_deltas(slot_deltas: &[SlotDelta]) -> Self { - // play all deltas back into the status cache - let mut me = Self::default(); - me.append(slot_deltas); - me - } - fn insert_with_slice( - &mut self, - transaction_blockhash: &Hash, + &self, + blockhash: &Hash, slot: Slot, key_index: usize, key_slice: [u8; CACHED_KEY_SIZE], res: T, ) { - let hash_map = - self.cache - .entry(*transaction_blockhash) - .or_insert((slot, key_index, HashMap::new())); - hash_map.0 = std::cmp::max(slot, hash_map.0); + { + let (max_slot, _, hash_map) = &*self.cache.get_or_insert_with(blockhash, || { + ( + AtomicU64::new(slot), + key_index, + DashMap::with_hasher_and_shard_amount(AHashRandomState::default(), KEY_SHARDS), + ) + }); + max_slot.fetch_max(slot, Ordering::Relaxed); - let forks = hash_map.2.entry(key_slice).or_default(); - forks.push((slot, res.clone())); + let mut forks = hash_map.entry(key_slice).or_default(); + forks.push((slot, res.clone())); + } - self.add_to_slot_delta(transaction_blockhash, slot, key_index, key_slice, res); + self.add_to_slot_delta(blockhash, slot, key_index, key_slice, res); } - // Add this key slice to the list of key slices for this slot and blockhash - // combo. + // Add this key slice to the list of key slices for this slot and blockhash combo. fn add_to_slot_delta( - &mut self, - transaction_blockhash: &Hash, + &self, + blockhash: &Hash, slot: Slot, key_index: usize, key_slice: [u8; CACHED_KEY_SIZE], res: T, ) { - let mut fork_entry = self.slot_deltas.entry(slot).or_default().lock().unwrap(); - let (_key_index, hash_entry) = fork_entry - .entry(*transaction_blockhash) - .or_insert((key_index, vec![])); - hash_entry.push((key_slice, res)) + let fork_entry = self.slot_deltas.get_or_insert_with(&slot, || { + DashMap::with_hasher_and_shard_amount(AHashRandomState::default(), KEY_SHARDS) + }); + + // In the vast majority of the cases, there will already be an entry for this blockhash, so + // do a get() first so that we can avoid taking a write lock on the corresponding shard. + if let Some(fork_entry) = fork_entry.get(blockhash) { + let (_key_index, txs) = &*fork_entry; + // txs is a ConcurrentVec so we can push without any locking + txs.push((key_slice, res)); + } else { + // Only take the write lock if this is the first time we've seen + // this blockhash in this slot. + let (_key_index, txs) = &mut *fork_entry + .entry(*blockhash) + .or_insert_with(|| (key_index, ConcurrentVec::new())); + txs.push((key_slice, res)); + }; + } +} + +// Wrapper around boxcar::Vec that implements Serialize and Deserialize. +#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] +#[derive(Debug)] +pub struct ConcurrentVec { + vec: boxcar::Vec, +} + +impl ConcurrentVec { + fn new() -> Self { + Self { + vec: boxcar::Vec::new(), + } + } + + fn push(&self, item: T) { + self.vec.push(item); + } + + fn iter(&self) -> impl Iterator { + self.vec.iter() + } +} + +impl IntoIterator for ConcurrentVec { + type Item = T; + type IntoIter = boxcar::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.vec.into_iter() + } +} + +impl<'a, T> IntoIterator for &'a ConcurrentVec { + type Item = (usize, &'a T); + type IntoIter = boxcar::Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.vec.iter() + } +} + +impl Serialize for ConcurrentVec { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let mut seq = serializer.serialize_seq(Some(self.vec.count()))?; + for (_, element) in &self.vec { + seq.serialize_element(element)?; + } + seq.end() + } +} + +impl<'de, T: Deserialize<'de>> Deserialize<'de> for ConcurrentVec { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct ConcurrentVecVisitor(std::marker::PhantomData); + + impl<'de, T: Deserialize<'de>> Visitor<'de> for ConcurrentVecVisitor { + type Value = ConcurrentVec; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a sequence") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let vec = boxcar::Vec::with_capacity(seq.size_hint().unwrap_or(0)); + + while let Some(value) = seq.next_element()? { + vec.push(value); + } + + Ok(ConcurrentVec { vec }) + } + } + + deserializer.deserialize_seq(ConcurrentVecVisitor(std::marker::PhantomData)) + } +} + +// Wrapper around dashmap that stores (K, Arc) to minimize shard contention. +#[derive(Debug)] +pub struct ReadOptimizedDashMap +where + K: Clone + Eq + std::hash::Hash, + S: Clone + BuildHasher, +{ + inner: DashMap, S>, +} + +impl ReadOptimizedDashMap +where + K: Clone + Eq + std::hash::Hash, + S: Clone + BuildHasher, +{ + fn new(inner: DashMap, S>) -> Self { + Self { inner } + } + + // Alternative to entry(k).or_insert_with(default) that returns an Arc instead of returning a + // guard that holds the underlying shard's write lock. + fn get_or_insert_with(&self, k: &K, default: impl FnOnce() -> V) -> Arc { + match self.inner.get(k) { + Some(v) => Arc::clone(&*v), + None => Arc::clone( + self.inner + .entry(k.clone()) + .or_insert_with(|| Arc::new(default())) + .value(), + ), + } + } + + fn get(&self, k: &K) -> Option> { + self.inner.get(k).map(|v| Arc::clone(&v)) + } + + fn iter(&self) -> impl Iterator, S>> { + self.inner.iter() + } + + fn remove(&self, k: &K) -> Option<(K, Arc)> { + self.inner.remove(k) + } + + fn retain(&self, f: impl FnMut(&K, &mut Arc) -> bool) { + self.inner.retain(f) + } + + #[cfg(feature = "dev-context-only-utils")] + fn clear(&self) { + self.inner.clear(); } } @@ -314,6 +529,47 @@ mod tests { type BankStatusCache = StatusCache<()>; + impl StatusCache { + fn from_slot_deltas(slot_deltas: &[SlotDelta]) -> Self { + let cache = Self::default(); + cache.append(slot_deltas); + cache + } + } + + impl PartialEq for StatusCache { + fn eq(&self, other: &Self) -> bool { + use std::collections::HashSet; + + let roots = self.roots.iter().map(|x| *x).collect::>(); + let other_roots = other.roots.iter().map(|x| *x).collect::>(); + roots == other_roots + && self.cache.iter().all(|item| { + let (hash, value) = item.pair(); + let (max_slot, key_index, hash_map) = &**value; + if let Some(item) = other.cache.get(hash) { + let (other_max_slot, other_key_index, other_hash_map) = &*item; + if max_slot.load(Ordering::Relaxed) + == other_max_slot.load(Ordering::Relaxed) + && key_index == other_key_index + { + return hash_map.iter().all(|item| { + let slice = item.key(); + let fork_map = item.value(); + if let Some(other_fork_map) = other_hash_map.get(slice) { + // all this work just to compare the highest forks in the fork map + // per entry + return fork_map.last() == other_fork_map.last(); + } + false + }); + } + } + false + }) + } + } + #[test] fn test_empty_has_no_sigs() { let sig = Signature::default(); @@ -332,7 +588,7 @@ mod tests { #[test] fn test_find_sig_with_ancestor_fork() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); let ancestors = vec![(0, 1)].into_iter().collect(); status_cache.insert(&blockhash, sig, 0, ()); @@ -349,7 +605,7 @@ mod tests { #[test] fn test_find_sig_without_ancestor_fork() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); let ancestors = Ancestors::default(); status_cache.insert(&blockhash, sig, 1, ()); @@ -360,7 +616,7 @@ mod tests { #[test] fn test_find_sig_with_root_ancestor_fork() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); let ancestors = Ancestors::default(); status_cache.insert(&blockhash, sig, 0, ()); @@ -374,7 +630,7 @@ mod tests { #[test] fn test_insert_picks_latest_blockhash_fork() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); let ancestors = vec![(0, 0)].into_iter().collect(); status_cache.insert(&blockhash, sig, 0, ()); @@ -390,7 +646,7 @@ mod tests { #[test] fn test_root_expires() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); let ancestors = Ancestors::default(); status_cache.insert(&blockhash, sig, 0, ()); @@ -403,7 +659,7 @@ mod tests { #[test] fn test_clear_signatures_sigs_are_gone() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); let ancestors = Ancestors::default(); status_cache.insert(&blockhash, sig, 0, ()); @@ -415,7 +671,7 @@ mod tests { #[test] fn test_clear_signatures_insert_works() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); let ancestors = Ancestors::default(); status_cache.add_root(0); @@ -429,11 +685,11 @@ mod tests { #[test] fn test_signatures_slice() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); status_cache.clear(); status_cache.insert(&blockhash, sig, 0, ()); - let (_, index, sig_map) = status_cache.cache.get(&blockhash).unwrap(); + let (_, index, sig_map) = &*status_cache.cache.get(&blockhash).unwrap(); let sig_slice: &[u8; CACHED_KEY_SIZE] = arrayref::array_ref![sig.as_ref(), *index, CACHED_KEY_SIZE]; assert!(sig_map.get(sig_slice).is_some()); @@ -442,11 +698,11 @@ mod tests { #[test] fn test_slot_deltas() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); status_cache.clear(); status_cache.insert(&blockhash, sig, 0, ()); - assert!(status_cache.roots().contains(&0)); + assert!(status_cache.roots().collect::>().contains(&0)); let slot_deltas = status_cache.root_slot_deltas(); let cache = StatusCache::from_slot_deltas(&slot_deltas); assert_eq!(cache, status_cache); @@ -458,7 +714,7 @@ mod tests { #[test] fn test_roots_deltas() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); let blockhash2 = hash(blockhash.as_ref()); status_cache.insert(&blockhash, sig, 0, ()); @@ -467,8 +723,7 @@ mod tests { for i in 0..(MAX_CACHE_ENTRIES + 1) { status_cache.add_root(i as u64); } - assert_eq!(status_cache.slot_deltas.len(), 1); - assert!(status_cache.slot_deltas.contains_key(&1)); + assert!(status_cache.slot_deltas.get(&1).is_some()); let slot_deltas = status_cache.root_slot_deltas(); let cache = StatusCache::from_slot_deltas(&slot_deltas); assert_eq!(cache, status_cache); @@ -483,7 +738,7 @@ mod tests { #[test] fn test_clear_slot_signatures() { let sig = Signature::default(); - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let blockhash = hash(Hash::default().as_ref()); let blockhash2 = hash(blockhash.as_ref()); status_cache.insert(&blockhash, sig, 0, ()); @@ -512,26 +767,26 @@ mod tests { // Check that the slot delta for slot 0 is gone, but slot 1 still // exists - assert!(!status_cache.slot_deltas.contains_key(&0)); - assert!(status_cache.slot_deltas.contains_key(&1)); + assert!(status_cache.slot_deltas.get(&0).is_none()); + assert!(status_cache.slot_deltas.get(&1).is_some()); // Clear slot 1 related data status_cache.clear_slot_entries(1); - assert!(status_cache.slot_deltas.is_empty()); + assert!(status_cache.slot_deltas.get(&0).is_none()); + assert!(status_cache.slot_deltas.get(&1).is_none()); assert!(status_cache .get_status(sig, &blockhash, &ancestors1) .is_none()); assert!(status_cache .get_status(sig, &blockhash2, &ancestors1) .is_none()); - assert!(status_cache.cache.is_empty()); } // Status cache uses a random key offset for each blockhash. Ensure that shorter // keys can still be used if the offset if greater than the key length. #[test] fn test_different_sized_keys() { - let mut status_cache = BankStatusCache::default(); + let status_cache = BankStatusCache::default(); let ancestors = vec![(0, 0)].into_iter().collect(); let blockhash = Hash::default(); for _ in 0..100 { diff --git a/sdk/frozen-abi/Cargo.toml b/sdk/frozen-abi/Cargo.toml index 8ac0d0282b3b02..c54aea05600992 100644 --- a/sdk/frozen-abi/Cargo.toml +++ b/sdk/frozen-abi/Cargo.toml @@ -12,6 +12,8 @@ edition = { workspace = true } [dependencies] bs58 = { workspace = true, features = ["alloc"] } bv = { workspace = true, features = ["serde"] } +boxcar = { workspace = true } +dashmap = { workspace = true } log = { workspace = true, features = ["std"] } serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } diff --git a/sdk/frozen-abi/src/abi_example.rs b/sdk/frozen-abi/src/abi_example.rs index 63b3c1d68c28d6..7006722c4bd847 100644 --- a/sdk/frozen-abi/src/abi_example.rs +++ b/sdk/frozen-abi/src/abi_example.rs @@ -1,5 +1,6 @@ use { crate::abi_digester::{AbiDigester, DigestError, DigestResult}, + dashmap::DashMap, log::*, serde::Serialize, std::any::type_name, @@ -617,3 +618,28 @@ impl AbiExample for std::sync::OnceLock { Self::from(T::example()) } } + +#[cfg(not(target_os = "solana"))] +impl< + T: std::cmp::Eq + std::hash::Hash + AbiExample, + S: AbiExample, + H: std::hash::BuildHasher + Default + std::clone::Clone, + > AbiExample for DashMap +{ + fn example() -> Self { + info!("AbiExample for (DashMap): {}", type_name::()); + let map = DashMap::default(); + map.insert(T::example(), S::example()); + map + } +} + +#[cfg(not(target_os = "solana"))] +impl AbiExample for boxcar::Vec { + fn example() -> Self { + info!("AbiExample for (boxcar::Vec): {}", type_name::()); + let vec = boxcar::Vec::new(); + vec.push(T::example()); + vec + } +} diff --git a/svm/examples/Cargo.lock b/svm/examples/Cargo.lock index 9a72be80f1c225..04d01faaf730d6 100644 --- a/svm/examples/Cargo.lock +++ b/svm/examples/Cargo.lock @@ -712,6 +712,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "boxcar" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f839cdf7e2d3198ac6ca003fd8ebc61715755f41c1cad15ff13df67531e00ed" + [[package]] name = "brotli" version = "7.0.0" @@ -1232,6 +1238,7 @@ dependencies = [ "once_cell", "parking_lot_core 0.9.10", "rayon", + "serde", ] [[package]] @@ -6499,6 +6506,7 @@ dependencies = [ "base64 0.22.1", "bincode", "blake3", + "boxcar", "bv", "bytemuck", "byteorder", @@ -6531,6 +6539,7 @@ dependencies = [ "serde_derive", "serde_json", "serde_with", + "smallvec", "solana-accounts-db", "solana-address-lookup-table-program", "solana-bpf-loader-program",