From 61449418b24abdd089920b8d19fd182c8febc20e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bruno=20Fran=C3=A7a?= Date: Tue, 28 May 2024 18:41:56 +0100 Subject: [PATCH 1/2] Fix ambiguous reproposal bug (BFT-454) (#120) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Now if there are >1 subquorums in the PrepareQC, leader will create a new proposal and replicas will require a new proposal. ## Why ❔ It is possible to have 2 subquorums: vote A and vote B, each with >2f weight, in a single PrepareQC (even in the unweighted case, because QC contains n-f signatures, not 4f+1). In such a situation from the POV of the BFT algorithm, leader is eligible to do any of the following: - reproposal for A - reproposal for B - a new proposal Since the choice here is ambiguous, this can break liveness. --- .../src/validator/messages/leader_prepare.rs | 13 +++- node/libs/roles/src/validator/tests.rs | 59 +++++++++++++++++++ 2 files changed, 70 insertions(+), 2 deletions(-) diff --git a/node/libs/roles/src/validator/messages/leader_prepare.rs b/node/libs/roles/src/validator/messages/leader_prepare.rs index f452a594..b5b9825f 100644 --- a/node/libs/roles/src/validator/messages/leader_prepare.rs +++ b/node/libs/roles/src/validator/messages/leader_prepare.rs @@ -75,6 +75,9 @@ impl PrepareQC { /// Get the highest block voted and check if there's a quorum of votes for it. To have a quorum /// in this situation, we require 2*f+1 votes, where f is the maximum number of faulty replicas. + /// Note that it is possible to have 2 quorums: vote A and vote B, each with >2f weight, in a single + /// PrepareQC (even in the unweighted case, because QC contains n-f signatures, not 4f+1). In such a + /// situation we say that there is no high vote. pub fn high_vote(&self, genesis: &Genesis) -> Option { let mut count: HashMap<_, u64> = HashMap::new(); for (msg, signers) in &self.map { @@ -82,9 +85,15 @@ impl PrepareQC { *count.entry(v.proposal).or_default() += genesis.validators.weight(signers); } } - // We only take one value from the iterator because there can only be at most one block with a quorum of 2f+1 votes. + let min = 2 * genesis.validators.max_faulty_weight() + 1; - count.into_iter().find(|x| x.1 >= min).map(|x| x.0) + let mut high_votes: Vec<_> = count.into_iter().filter(|x| x.1 >= min).collect(); + + if high_votes.len() == 1 { + high_votes.pop().map(|x| x.0) + } else { + None + } } /// Get the highest CommitQC. diff --git a/node/libs/roles/src/validator/tests.rs b/node/libs/roles/src/validator/tests.rs index 2f0136f0..ec6aa1e1 100644 --- a/node/libs/roles/src/validator/tests.rs +++ b/node/libs/roles/src/validator/tests.rs @@ -346,6 +346,65 @@ fn test_prepare_qc() { } } +#[test] +fn test_prepare_qc_high_vote() { + let ctx = ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + + // This will create equally weighted validators + let setup = Setup::new(rng, 6); + + let view_num: ViewNumber = rng.gen(); + let msg_a = make_replica_prepare(rng, view_num, &setup); + let msg_b = make_replica_prepare(rng, view_num, &setup); + let msg_c = make_replica_prepare(rng, view_num, &setup); + + // Case with 1 subquorum. + let mut qc = PrepareQC::new(msg_a.view.clone()); + + for key in &setup.validator_keys { + qc.add(&key.sign_msg(msg_a.clone()), &setup.genesis) + .unwrap(); + } + + assert!(qc.high_vote(&setup.genesis).is_some()); + + // Case with 2 subquorums. + let mut qc = PrepareQC::new(msg_a.view.clone()); + + for key in &setup.validator_keys[0..3] { + qc.add(&key.sign_msg(msg_a.clone()), &setup.genesis) + .unwrap(); + } + + for key in &setup.validator_keys[3..6] { + qc.add(&key.sign_msg(msg_b.clone()), &setup.genesis) + .unwrap(); + } + + assert!(qc.high_vote(&setup.genesis).is_none()); + + // Case with no subquorums. + let mut qc = PrepareQC::new(msg_a.view.clone()); + + for key in &setup.validator_keys[0..2] { + qc.add(&key.sign_msg(msg_a.clone()), &setup.genesis) + .unwrap(); + } + + for key in &setup.validator_keys[2..4] { + qc.add(&key.sign_msg(msg_b.clone()), &setup.genesis) + .unwrap(); + } + + for key in &setup.validator_keys[4..6] { + qc.add(&key.sign_msg(msg_c.clone()), &setup.genesis) + .unwrap(); + } + + assert!(qc.high_vote(&setup.genesis).is_none()); +} + #[test] fn test_prepare_qc_add_errors() { use PrepareQCAddError as Error; From 410636c67936b96bd8b5b2d821348e31bed31ed0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bruno=20Fran=C3=A7a?= Date: Wed, 29 May 2024 14:13:49 +0100 Subject: [PATCH 2/2] Make validators broadcast the high Commit QC (BFT-458) (#119) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ We now broadcast the Replica Prepare message to all replicas. Replicas then use it to update their high Commit QCs and change views id necessary. ## Why ❔ The high Commit QC broadcast should be independent from the rest of the algorithm, so that view synchronization is independent from the bft logic - it simplifies reasoning a lot and prevents potential deadlocks that our implementation currently has --- node/actors/bft/src/leader/replica_commit.rs | 3 +- node/actors/bft/src/leader/replica_prepare.rs | 3 +- node/actors/bft/src/leader/state_machine.rs | 16 ++- node/actors/bft/src/lib.rs | 17 ++- node/actors/bft/src/replica/block.rs | 2 + node/actors/bft/src/replica/mod.rs | 1 + node/actors/bft/src/replica/new_view.rs | 4 +- .../actors/bft/src/replica/replica_prepare.rs | 105 ++++++++++++++++++ node/actors/bft/src/replica/state_machine.rs | 54 ++++++++- node/actors/network/src/gossip/mod.rs | 3 +- .../network/src/rpc/push_batch_votes.rs | 3 +- node/libs/roles/src/attester/conv.rs | 9 +- .../src/attester/keys/aggregate_signature.rs | 3 +- .../libs/roles/src/attester/keys/signature.rs | 3 +- .../libs/roles/src/attester/messages/batch.rs | 3 +- node/libs/roles/src/attester/messages/msg.rs | 3 +- node/libs/roles/src/attester/mod.rs | 3 +- node/libs/roles/src/attester/tests.rs | 3 +- node/libs/roles/src/validator/conv.rs | 12 +- .../roles/src/validator/messages/tests.rs | 6 +- node/libs/roles/src/validator/testonly.rs | 4 +- 21 files changed, 210 insertions(+), 50 deletions(-) create mode 100644 node/actors/bft/src/replica/replica_prepare.rs diff --git a/node/actors/bft/src/leader/replica_commit.rs b/node/actors/bft/src/leader/replica_commit.rs index e44d7a5d..741ef5be 100644 --- a/node/actors/bft/src/leader/replica_commit.rs +++ b/node/actors/bft/src/leader/replica_commit.rs @@ -1,9 +1,8 @@ //! Handler of a ReplicaCommit message. -use std::collections::HashSet; - use super::StateMachine; use crate::metrics; +use std::collections::HashSet; use tracing::instrument; use zksync_concurrency::{ctx, metrics::LatencyHistogramExt as _}; use zksync_consensus_network::io::{ConsensusInputMessage, Target}; diff --git a/node/actors/bft/src/leader/replica_prepare.rs b/node/actors/bft/src/leader/replica_prepare.rs index a9ab65a5..eb8350ea 100644 --- a/node/actors/bft/src/leader/replica_prepare.rs +++ b/node/actors/bft/src/leader/replica_prepare.rs @@ -1,7 +1,6 @@ //! Handler of a ReplicaPrepare message. -use std::collections::HashSet; - use super::StateMachine; +use std::collections::HashSet; use tracing::instrument; use zksync_concurrency::{ctx, error::Wrap}; use zksync_consensus_roles::validator; diff --git a/node/actors/bft/src/leader/state_machine.rs b/node/actors/bft/src/leader/state_machine.rs index 253cea9a..6f26e866 100644 --- a/node/actors/bft/src/leader/state_machine.rs +++ b/node/actors/bft/src/leader/state_machine.rs @@ -93,11 +93,19 @@ impl StateMachine { .wrap("process_replica_prepare()") { Ok(()) => Ok(()), - Err(super::replica_prepare::Error::Internal(err)) => { - return Err(err); - } Err(err) => { - tracing::warn!("process_replica_prepare: {err:#}"); + match err { + super::replica_prepare::Error::Internal(e) => { + return Err(e); + } + super::replica_prepare::Error::Old { .. } + | super::replica_prepare::Error::NotLeaderInView => { + tracing::info!("process_replica_prepare: {err:#}"); + } + _ => { + tracing::warn!("process_replica_prepare: {err:#}"); + } + } Err(()) } }; diff --git a/node/actors/bft/src/lib.rs b/node/actors/bft/src/lib.rs index ddce385f..d3b2bc40 100644 --- a/node/actors/bft/src/lib.rs +++ b/node/actors/bft/src/lib.rs @@ -19,7 +19,8 @@ use crate::io::{InputMessage, OutputMessage}; use anyhow::Context; pub use config::Config; use std::sync::Arc; -use zksync_concurrency::{ctx, scope}; +use zksync_concurrency::{ctx, oneshot, scope}; +use zksync_consensus_network::io::ConsensusReq; use zksync_consensus_roles::validator; use zksync_consensus_utils::pipe::ActorPipe; @@ -93,7 +94,19 @@ impl Config { let InputMessage::Network(req) = pipe.recv.recv(ctx).await?; use validator::ConsensusMsg as M; match &req.msg.msg { - M::ReplicaPrepare(_) | M::ReplicaCommit(_) => leader_send.send(req), + M::ReplicaPrepare(_) => { + // This is a hacky way to do a clone. This is necessary since we don't want to derive + // Clone for ConsensusReq. When we change to ChonkyBFT this will be removed anyway. + let (ack, _) = oneshot::channel(); + let new_req = ConsensusReq { + msg: req.msg.clone(), + ack, + }; + + replica_send.send(new_req); + leader_send.send(req); + } + M::ReplicaCommit(_) => leader_send.send(req), M::LeaderPrepare(_) | M::LeaderCommit(_) => replica_send.send(req), } } diff --git a/node/actors/bft/src/replica/block.rs b/node/actors/bft/src/replica/block.rs index 7834b5e7..adea3d78 100644 --- a/node/actors/bft/src/replica/block.rs +++ b/node/actors/bft/src/replica/block.rs @@ -6,6 +6,8 @@ impl StateMachine { /// Tries to build a finalized block from the given CommitQC. We simply search our /// block proposal cache for the matching block, and if we find it we build the block. /// If this method succeeds, it sends the finalized block to the executor. + /// It also updates the High QC in the replica state machine, if the received QC is + /// higher. #[tracing::instrument(level = "debug", skip_all)] pub(crate) async fn save_block( &mut self, diff --git a/node/actors/bft/src/replica/mod.rs b/node/actors/bft/src/replica/mod.rs index 05a72481..640f044b 100644 --- a/node/actors/bft/src/replica/mod.rs +++ b/node/actors/bft/src/replica/mod.rs @@ -6,6 +6,7 @@ mod block; pub(crate) mod leader_commit; pub(crate) mod leader_prepare; mod new_view; +pub(crate) mod replica_prepare; mod state_machine; #[cfg(test)] mod tests; diff --git a/node/actors/bft/src/replica/new_view.rs b/node/actors/bft/src/replica/new_view.rs index 03996fc2..ee7b5993 100644 --- a/node/actors/bft/src/replica/new_view.rs +++ b/node/actors/bft/src/replica/new_view.rs @@ -24,7 +24,7 @@ impl StateMachine { // Backup our state. self.backup_state(ctx).await.wrap("backup_state()")?; - // Send the replica message to the next leader. + // Send the replica message. let output_message = ConsensusInputMessage { message: self .config @@ -39,7 +39,7 @@ impl StateMachine { high_qc: self.high_qc.clone(), }, )), - recipient: Target::Validator(self.config.genesis().view_leader(self.view)), + recipient: Target::Broadcast, }; self.outbound_pipe.send(output_message.into()); diff --git a/node/actors/bft/src/replica/replica_prepare.rs b/node/actors/bft/src/replica/replica_prepare.rs new file mode 100644 index 00000000..34543170 --- /dev/null +++ b/node/actors/bft/src/replica/replica_prepare.rs @@ -0,0 +1,105 @@ +//! Handler of a ReplicaPrepare message. +use super::StateMachine; +use tracing::instrument; +use zksync_concurrency::{ctx, error::Wrap}; +use zksync_consensus_roles::validator; + +/// Errors that can occur when processing a "replica prepare" message. +#[derive(Debug, thiserror::Error)] +pub(crate) enum Error { + /// Message signer isn't part of the validator set. + #[error("Message signer isn't part of the validator set (signer: {signer:?})")] + NonValidatorSigner { + /// Signer of the message. + signer: validator::PublicKey, + }, + /// Past view or phase. + #[error("past view/phase (current view: {current_view:?}, current phase: {current_phase:?})")] + Old { + /// Current view. + current_view: validator::ViewNumber, + /// Current phase. + current_phase: validator::Phase, + }, + /// Invalid message signature. + #[error("invalid signature: {0:#}")] + InvalidSignature(#[source] anyhow::Error), + /// Invalid message. + #[error(transparent)] + InvalidMessage(validator::ReplicaPrepareVerifyError), + /// Internal error. Unlike other error types, this one isn't supposed to be easily recoverable. + #[error(transparent)] + Internal(#[from] ctx::Error), +} + +impl Wrap for Error { + fn with_wrap C>( + self, + f: F, + ) -> Self { + match self { + Error::Internal(err) => Error::Internal(err.with_wrap(f)), + err => err, + } + } +} + +impl StateMachine { + #[instrument(level = "trace", skip(self), ret)] + pub(crate) async fn process_replica_prepare( + &mut self, + ctx: &ctx::Ctx, + signed_message: validator::Signed, + ) -> Result<(), Error> { + // ----------- Checking origin of the message -------------- + + // Unwrap message. + let message = signed_message.msg.clone(); + let author = &signed_message.key; + + // Check that the message signer is in the validator set. + if !self.config.genesis().validators.contains(author) { + return Err(Error::NonValidatorSigner { + signer: author.clone(), + }); + } + + // We only accept this type of message from the future. + if message.view.number <= self.view { + return Err(Error::Old { + current_view: self.view, + current_phase: self.phase, + }); + } + + // ----------- Checking the signed part of the message -------------- + + // Check the signature on the message. + signed_message.verify().map_err(Error::InvalidSignature)?; + + // Extract the QC and verify it. + let Some(high_qc) = message.high_qc else { + return Ok(()); + }; + + high_qc.verify(self.config.genesis()).map_err(|err| { + Error::InvalidMessage(validator::ReplicaPrepareVerifyError::HighQC(err)) + })?; + + // ----------- All checks finished. Now we process the message. -------------- + + let qc_view = high_qc.view().number; + + // Try to create a finalized block with this CommitQC and our block proposal cache. + // It will also update our high QC, if necessary. + self.save_block(ctx, &high_qc).await.wrap("save_block()")?; + + // Skip to a new view, if necessary. + if qc_view >= self.view { + self.view = qc_view; + self.start_new_view(ctx).await.wrap("start_new_view()")?; + } + + Ok(()) + } +} diff --git a/node/actors/bft/src/replica/state_machine.rs b/node/actors/bft/src/replica/state_machine.rs index e5493fd4..b00aedd0 100644 --- a/node/actors/bft/src/replica/state_machine.rs +++ b/node/actors/bft/src/replica/state_machine.rs @@ -106,18 +106,51 @@ impl StateMachine { let now = ctx.now(); let label = match &req.msg.msg { + ConsensusMsg::ReplicaPrepare(_) => { + let res = match self + .process_replica_prepare(ctx, req.msg.cast().unwrap()) + .await + .wrap("process_replica_prepare()") + { + Ok(()) => Ok(()), + Err(err) => { + match err { + super::replica_prepare::Error::Internal(e) => { + return Err(e); + } + super::replica_prepare::Error::Old { .. } => { + tracing::info!("process_replica_prepare: {err:#}"); + } + _ => { + tracing::warn!("process_replica_prepare: {err:#}"); + } + } + Err(()) + } + }; + metrics::ConsensusMsgLabel::ReplicaPrepare.with_result(&res) + } ConsensusMsg::LeaderPrepare(_) => { let res = match self .process_leader_prepare(ctx, req.msg.cast().unwrap()) .await .wrap("process_leader_prepare()") { - Err(super::leader_prepare::Error::Internal(err)) => return Err(err), + Ok(()) => Ok(()), Err(err) => { - tracing::warn!("process_leader_prepare(): {err:#}"); + match err { + super::leader_prepare::Error::Internal(e) => { + return Err(e); + } + super::leader_prepare::Error::Old { .. } => { + tracing::info!("process_leader_prepare: {err:#}"); + } + _ => { + tracing::warn!("process_leader_prepare: {err:#}"); + } + } Err(()) } - Ok(()) => Ok(()), }; metrics::ConsensusMsgLabel::LeaderPrepare.with_result(&res) } @@ -127,12 +160,21 @@ impl StateMachine { .await .wrap("process_leader_commit()") { - Err(super::leader_commit::Error::Internal(err)) => return Err(err), + Ok(()) => Ok(()), Err(err) => { - tracing::warn!("process_leader_commit(): {err:#}"); + match err { + super::leader_commit::Error::Internal(e) => { + return Err(e); + } + super::leader_commit::Error::Old { .. } => { + tracing::info!("process_leader_commit: {err:#}"); + } + _ => { + tracing::warn!("process_leader_commit: {err:#}"); + } + } Err(()) } - Ok(()) => Ok(()), }; metrics::ConsensusMsgLabel::LeaderCommit.with_result(&res) } diff --git a/node/actors/network/src/gossip/mod.rs b/node/actors/network/src/gossip/mod.rs index 7242f4a0..7091c643 100644 --- a/node/actors/network/src/gossip/mod.rs +++ b/node/actors/network/src/gossip/mod.rs @@ -12,6 +12,7 @@ //! Static connections constitute a rigid "backbone" of the gossip network, which is insensitive to //! eclipse attack. Dynamic connections are supposed to improve the properties of the gossip //! network graph (minimize its diameter, increase connectedness). +use self::batch_votes::BatchVotesWatch; use crate::{gossip::ValidatorAddrsWatch, io, pool::PoolWatch, Config}; use anyhow::Context as _; use im::HashMap; @@ -21,8 +22,6 @@ use zksync_concurrency::{ctx, ctx::channel, scope, sync}; use zksync_consensus_roles::{attester, node, validator}; use zksync_consensus_storage::BlockStore; -use self::batch_votes::BatchVotesWatch; - mod batch_votes; mod fetch; mod handshake; diff --git a/node/actors/network/src/rpc/push_batch_votes.rs b/node/actors/network/src/rpc/push_batch_votes.rs index 35c3762c..51ab7642 100644 --- a/node/actors/network/src/rpc/push_batch_votes.rs +++ b/node/actors/network/src/rpc/push_batch_votes.rs @@ -1,8 +1,7 @@ //! Defines RPC for passing consensus messages. -use std::sync::Arc; - use crate::{mux, proto::gossip as proto}; use anyhow::Context as _; +use std::sync::Arc; use zksync_consensus_roles::attester::{self, Batch}; use zksync_protobuf::ProtoFmt; diff --git a/node/libs/roles/src/attester/conv.rs b/node/libs/roles/src/attester/conv.rs index b636fcc9..85a32e83 100644 --- a/node/libs/roles/src/attester/conv.rs +++ b/node/libs/roles/src/attester/conv.rs @@ -1,14 +1,13 @@ +use super::{ + AggregateSignature, Batch, BatchNumber, BatchQC, Msg, MsgHash, PublicKey, Signature, Signed, + Signers, WeightedAttester, +}; use crate::proto::attester::{self as proto}; use anyhow::Context as _; use zksync_consensus_crypto::ByteFmt; use zksync_consensus_utils::enum_util::Variant; use zksync_protobuf::{read_required, required, ProtoFmt}; -use super::{ - AggregateSignature, Batch, BatchNumber, BatchQC, Msg, MsgHash, PublicKey, Signature, Signed, - Signers, WeightedAttester, -}; - impl ProtoFmt for Batch { type Proto = proto::Batch; fn read(r: &Self::Proto) -> anyhow::Result { diff --git a/node/libs/roles/src/attester/keys/aggregate_signature.rs b/node/libs/roles/src/attester/keys/aggregate_signature.rs index 95ab6e0e..5143c4a3 100644 --- a/node/libs/roles/src/attester/keys/aggregate_signature.rs +++ b/node/libs/roles/src/attester/keys/aggregate_signature.rs @@ -1,6 +1,5 @@ -use crate::attester::{Batch, MsgHash}; - use super::{PublicKey, Signature}; +use crate::attester::{Batch, MsgHash}; use std::fmt; use zksync_consensus_crypto::{bn254, ByteFmt, Text, TextFmt}; use zksync_consensus_utils::enum_util::Variant; diff --git a/node/libs/roles/src/attester/keys/signature.rs b/node/libs/roles/src/attester/keys/signature.rs index 22583ddf..e99b6486 100644 --- a/node/libs/roles/src/attester/keys/signature.rs +++ b/node/libs/roles/src/attester/keys/signature.rs @@ -1,6 +1,5 @@ -use crate::attester::{Msg, MsgHash}; - use super::PublicKey; +use crate::attester::{Msg, MsgHash}; use std::fmt; use zksync_consensus_crypto::{bn254, ByteFmt, Text, TextFmt}; diff --git a/node/libs/roles/src/attester/messages/batch.rs b/node/libs/roles/src/attester/messages/batch.rs index b6af94d6..048f37ec 100644 --- a/node/libs/roles/src/attester/messages/batch.rs +++ b/node/libs/roles/src/attester/messages/batch.rs @@ -1,6 +1,5 @@ -use crate::{attester, validator::Genesis}; - use super::{Signed, Signers}; +use crate::{attester, validator::Genesis}; use anyhow::{ensure, Context as _}; #[derive(Clone, Debug, PartialEq, Eq, Hash, Default, PartialOrd)] diff --git a/node/libs/roles/src/attester/messages/msg.rs b/node/libs/roles/src/attester/messages/msg.rs index 5501cae9..cc52b347 100644 --- a/node/libs/roles/src/attester/messages/msg.rs +++ b/node/libs/roles/src/attester/messages/msg.rs @@ -1,8 +1,7 @@ -use std::{collections::BTreeMap, fmt}; - use crate::{attester, validator}; use anyhow::Context as _; use bit_vec::BitVec; +use std::{collections::BTreeMap, fmt}; use zksync_consensus_crypto::{keccak256, ByteFmt, Text, TextFmt}; use zksync_consensus_utils::enum_util::{BadVariantError, Variant}; diff --git a/node/libs/roles/src/attester/mod.rs b/node/libs/roles/src/attester/mod.rs index 5bc5b466..8dae1b83 100644 --- a/node/libs/roles/src/attester/mod.rs +++ b/node/libs/roles/src/attester/mod.rs @@ -8,5 +8,4 @@ mod keys; mod messages; mod testonly; -pub use self::keys::*; -pub use self::messages::*; +pub use self::{keys::*, messages::*}; diff --git a/node/libs/roles/src/attester/tests.rs b/node/libs/roles/src/attester/tests.rs index f29ed464..913790ad 100644 --- a/node/libs/roles/src/attester/tests.rs +++ b/node/libs/roles/src/attester/tests.rs @@ -1,6 +1,5 @@ -use crate::validator::testonly::Setup; - use super::*; +use crate::validator::testonly::Setup; use assert_matches::assert_matches; use rand::Rng; use zksync_concurrency::ctx; diff --git a/node/libs/roles/src/validator/conv.rs b/node/libs/roles/src/validator/conv.rs index e7f6964f..8d6703b0 100644 --- a/node/libs/roles/src/validator/conv.rs +++ b/node/libs/roles/src/validator/conv.rs @@ -1,15 +1,15 @@ -use crate::{ - attester::{self, WeightedAttester}, - node::SessionId, -}; - use super::{ AggregateSignature, BlockHeader, BlockNumber, ChainId, CommitQC, Committee, ConsensusMsg, FinalBlock, ForkNumber, Genesis, GenesisHash, GenesisRaw, LeaderCommit, LeaderPrepare, Msg, MsgHash, NetAddress, Payload, PayloadHash, Phase, PrepareQC, ProtocolVersion, PublicKey, ReplicaCommit, ReplicaPrepare, Signature, Signed, Signers, View, ViewNumber, WeightedValidator, }; -use crate::{proto::validator as proto, validator::LeaderSelectionMode}; +use crate::{ + attester::{self, WeightedAttester}, + node::SessionId, + proto::validator as proto, + validator::LeaderSelectionMode, +}; use anyhow::Context as _; use std::collections::BTreeMap; use zksync_consensus_crypto::ByteFmt; diff --git a/node/libs/roles/src/validator/messages/tests.rs b/node/libs/roles/src/validator/messages/tests.rs index c570177f..fe1812d7 100644 --- a/node/libs/roles/src/validator/messages/tests.rs +++ b/node/libs/roles/src/validator/messages/tests.rs @@ -1,5 +1,7 @@ -use crate::attester::{self, WeightedAttester}; -use crate::validator::*; +use crate::{ + attester::{self, WeightedAttester}, + validator::*, +}; use anyhow::Context as _; use rand::{prelude::StdRng, Rng, SeedableRng}; use zksync_concurrency::ctx; diff --git a/node/libs/roles/src/validator/testonly.rs b/node/libs/roles/src/validator/testonly.rs index fc400fef..6a01e35d 100644 --- a/node/libs/roles/src/validator/testonly.rs +++ b/node/libs/roles/src/validator/testonly.rs @@ -1,6 +1,4 @@ //! Test-only utilities. -use crate::attester; - use super::{ AggregateSignature, BlockHeader, BlockNumber, ChainId, CommitQC, Committee, ConsensusMsg, FinalBlock, ForkNumber, Genesis, GenesisHash, GenesisRaw, LeaderCommit, LeaderPrepare, Msg, @@ -8,7 +6,7 @@ use super::{ ReplicaCommit, ReplicaPrepare, SecretKey, Signature, Signed, Signers, View, ViewNumber, WeightedValidator, }; -use crate::validator::LeaderSelectionMode; +use crate::{attester, validator::LeaderSelectionMode}; use bit_vec::BitVec; use rand::{ distributions::{Distribution, Standard},