Skip to content

Commit

Permalink
BFT-465: Skeleton for Twins tests
Browse files Browse the repository at this point in the history
  • Loading branch information
aakoshh committed May 24, 2024
1 parent 4f00504 commit ec75578
Show file tree
Hide file tree
Showing 3 changed files with 162 additions and 9 deletions.
19 changes: 15 additions & 4 deletions node/actors/bft/src/testonly/run.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
use super::{Behavior, Node};
use network::Config;
use std::collections::HashMap;
use tracing::Instrument as _;
use zksync_concurrency::{ctx, oneshot, scope};
use zksync_consensus_network as network;
use zksync_consensus_roles::validator;
use zksync_consensus_roles::validator::{self, Genesis};
use zksync_consensus_storage::testonly::new_store;
use zksync_consensus_utils::pipe;

Expand All @@ -22,19 +23,29 @@ pub(crate) struct Test {
}

impl Test {
/// Run a test with the given parameters.
/// Run a test with the given parameters and a random network setup.
pub(crate) async fn run(&self, ctx: &ctx::Ctx) -> anyhow::Result<()> {
let rng = &mut ctx.rng();
let setup = validator::testonly::Setup::new_with_weights(
rng,
self.nodes.iter().map(|(_, w)| *w).collect(),
);
let nets: Vec<_> = network::testonly::new_configs(rng, &setup, 1);
self.run_with_config(ctx, nets, &setup.genesis).await
}

/// Run a test with the given parameters and network configuration.
pub(crate) async fn run_with_config(
&self,
ctx: &ctx::Ctx,
nets: Vec<Config>,
genesis: &Genesis,
) -> anyhow::Result<()> {
let mut nodes = vec![];
let mut honest = vec![];
scope::run!(ctx, |ctx, s| async {
for (i, net) in nets.into_iter().enumerate() {
let (store, runner) = new_store(ctx, &setup.genesis).await;
let (store, runner) = new_store(ctx, genesis).await;
s.spawn_bg(runner.run(ctx));
if self.nodes[i].0 == Behavior::Honest {
honest.push(store.clone());
Expand All @@ -50,7 +61,7 @@ impl Test {

// Run the nodes until all honest nodes store enough finalized blocks.
assert!(self.blocks_to_finalize > 0);
let first = setup.genesis.first_block;
let first = genesis.first_block;
let last = first + (self.blocks_to_finalize as u64 - 1);
for store in &honest {
store.wait_until_queued(ctx, last).await?;
Expand Down
131 changes: 128 additions & 3 deletions node/actors/bft/src/tests.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,19 @@
use crate::testonly::{ut_harness::UTHarness, Behavior, Network, Test};
use zksync_concurrency::{ctx, scope, time};
use zksync_consensus_roles::validator;
use crate::testonly::{
twins::{Cluster, HasKey, ScenarioGenerator, Twin},
ut_harness::UTHarness,
Behavior, Network, Test,
};
use rand::Rng;
use zksync_concurrency::{
ctx::{self, Ctx},
scope, time,
};
use zksync_consensus_network::testonly::new_configs_for_validators;
use zksync_consensus_roles::validator::{
self,
testonly::{Setup, SetupSpec},
LeaderSelectionMode, PublicKey,
};

async fn run_test(behavior: Behavior, network: Network) {
let _guard = zksync_concurrency::testonly::set_timeout(time::Duration::seconds(30));
Expand Down Expand Up @@ -195,3 +208,115 @@ async fn non_proposing_leader() {
.await
.unwrap()
}

/// Run Twins scenarios without actual twins, so just random partitions and leaders,
/// to see that the basic mechanics of the network allow finalizations to happen.
#[tokio::test(flavor = "multi_thread")]
async fn honest_no_twins_network() {
let ctx = &ctx::test_root(&ctx::RealClock);
let rng = &mut ctx.rng();

for _ in 0..5 {
let num_replicas = rng.gen_range(1..=11);
run_twins(ctx, num_replicas, false);
}
}

async fn run_twins(ctx: &Ctx, num_replicas: usize, use_twins: bool) {
#[derive(PartialEq)]
struct Replica {
id: i64,
public_key: PublicKey,
}

impl HasKey for Replica {
type Key = PublicKey;

fn key(&self) -> &Self::Key {
&self.public_key
}
}

impl Twin for Replica {
fn to_twin(&self) -> Self {
Self {
id: self.id * -1,
public_key: self.public_key.clone(),
}
}
}

let _guard = zksync_concurrency::testonly::set_timeout(time::Duration::seconds(30));
zksync_concurrency::testonly::abort_on_panic();
let rng = &mut ctx.rng();

// The existing test machinery uses the number of finalized blocks as an exit criteria.
let blocks_to_finalize = 5;
// The test is going to disrupt the communication by partitioning nodes,
// where the leader might not be in a partition with enough replicas to
// form a quorum, therefore to allow N blocks to be finalized we need to
// go longer.
let num_rounds = blocks_to_finalize * 5;
// The paper considers 2 or 3 partitions enough.
let max_partitions = 3;

// Everyone on the twins network is honest.
// For now assign one power each (not, say, 100 each, or varying weights).
let mut nodes = vec![(Behavior::Honest, 1u64); num_replicas];
let num_honest = validator::threshold(num_replicas as u64) as usize;
let num_faulty = num_replicas - num_honest;
let num_twins = if use_twins && num_faulty > 0 {
rng.gen_range(1..=num_faulty)
} else {
0
};

let mut spec = SetupSpec::new_with_weights(rng, nodes.iter().map(|(_, w)| *w).collect());

let replicas = spec
.validator_weights
.iter()
.enumerate()
.map(|(i, (sk, _))| Replica {
id: i as i64,
public_key: sk.public(),
})
.collect::<Vec<_>>();

let cluster = Cluster::new(replicas, num_twins);
let scenarios = ScenarioGenerator::new(&cluster, num_rounds, max_partitions);

// Reuse the same cluster to run a few scenarios.
for _ in 0..10 {
// Generate a permutation of partitions and leaders for the given number of rounds.
let scenario = scenarios.generate_one(rng);

// Assign the leadership schedule to the consensus.
spec.leader_selection =
LeaderSelectionMode::Rota(scenario.rounds.iter().map(|rc| rc.leader.clone()).collect());

// Generate a new setup with this leadership schedule.
let setup = Setup::from(spec.clone());

// Create network config for honest nodes, and then extras for the twins.
let validator_keys = setup
.validator_keys
.iter()
.chain(setup.validator_keys.iter().take(num_twins));

let nets = new_configs_for_validators(rng, validator_keys, 1);

// TODO: Create a network mode that supports partition schedule,
// which requires identifying the sender network (not validator) identity.
let network = todo!()

Test {
network,
nodes,
blocks_to_finalize,
}
.run_with_config(ctx, nets, &setup.genesis)
.await
.unwrap()
}
}
21 changes: 19 additions & 2 deletions node/actors/network/src/testonly.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,10 @@ use std::{
sync::Arc,
};
use zksync_concurrency::{ctx, ctx::channel, io, limiter, net, scope, sync};
use zksync_consensus_roles::{node, validator};
use zksync_consensus_roles::{
node,
validator::{self, SecretKey},
};
use zksync_consensus_storage::BlockStore;
use zksync_consensus_utils::pipe;

Expand Down Expand Up @@ -77,7 +80,21 @@ pub fn new_configs(
setup: &validator::testonly::Setup,
gossip_peers: usize,
) -> Vec<Config> {
let configs = setup.validator_keys.iter().map(|validator_key| {
new_configs_for_validators(rng, setup.validator_keys.iter(), gossip_peers)
}

/// Construct configs for `n` validators of the consensus.
///
/// This version allows for repeating keys used in Twins tests.
pub fn new_configs_for_validators<'a, I>(
rng: &mut impl Rng,
validator_keys: I,
gossip_peers: usize,
) -> Vec<Config>
where
I: Iterator<Item = &'a SecretKey>,
{
let configs = validator_keys.map(|validator_key| {
let addr = net::tcp::testonly::reserve_listener();
Config {
server_addr: addr,
Expand Down

0 comments on commit ec75578

Please sign in to comment.