diff --git a/benchmark/benchmark/logs.py b/benchmark/benchmark/logs.py index e8830ad4..e30a05ea 100644 --- a/benchmark/benchmark/logs.py +++ b/benchmark/benchmark/logs.py @@ -55,7 +55,7 @@ def __init__(self, clients, nodes, faults=0): # Check whether the nodes timed out. # Note that nodes are expected to time out once at the beginning. - if self.timeouts > 1: + if self.timeouts > 2: Print.warn(f'Nodes timed out {self.timeouts:,} time(s)') def _merge_results(self, input): @@ -107,6 +107,14 @@ def _parse_nodes(self, log): configs = { 'consensus': { + 'timeout_delay': int( + search(r'Consensus timeout delay .* (\d+)', log).group(1) + ), + 'sync_retry_delay': int( + search( + r'Consensus synchronizer retry delay .* (\d+)', log + ).group(1) + ), 'max_payload_size': int( search(r'Consensus max payload size .* (\d+)', log).group(1) ), @@ -115,6 +123,14 @@ def _parse_nodes(self, log): ), }, 'mempool': { + 'queue_capacity': int( + search(r'Mempool queue capacity set to (\d+)', log).group(1) + ), + 'sync_retry_delay': int( + search( + r'Mempool synchronizer retry delay .* (\d+)', log + ).group(1) + ), 'max_payload_size': int( search(r'Mempool max payload size .* (\d+)', log).group(1) ), @@ -162,7 +178,7 @@ def _end_to_end_latency(self): assert tx_id in sent # We receive txs that we sent. start = sent[tx_id] end = self.commits[batch_id] - latency += [end-start] + latency += [end-start] return mean(latency) if latency else 0 def result(self): @@ -171,8 +187,12 @@ def result(self): end_to_end_tps, end_to_end_bps, duration = self._end_to_end_throughput() end_to_end_latency = self._end_to_end_latency() * 1000 + consensus_timeout_delay = self.configs[0]['consensus']['timeout_delay'] + consensus_sync_retry_delay = self.configs[0]['consensus']['sync_retry_delay'] consensus_max_payload_size = self.configs[0]['consensus']['max_payload_size'] consensus_min_block_delay = self.configs[0]['consensus']['min_block_delay'] + mempool_queue_capacity = self.configs[0]['mempool']['queue_capacity'] + mempool_sync_retry_delay = self.configs[0]['mempool']['sync_retry_delay'] mempool_max_payload_size = self.configs[0]['mempool']['max_payload_size'] mempool_min_block_delay = self.configs[0]['mempool']['min_block_delay'] @@ -188,8 +208,12 @@ def result(self): f' Faults: {self.faults} nodes\n' f' Execution time: {round(duration):,} s\n' '\n' + f' Consensus timeout delay: {consensus_timeout_delay:,} ms\n' + f' Consensus sync retry delay: {consensus_sync_retry_delay:,} ms\n' f' Consensus max payloads size: {consensus_max_payload_size:,} B\n' f' Consensus min block delay: {consensus_min_block_delay:,} ms\n' + f' Mempool queue capacity: {mempool_queue_capacity:,} B\n' + f' Mempool sync retry delay: {mempool_sync_retry_delay:,} ms\n' f' Mempool max payloads size: {mempool_max_payload_size:,} B\n' f' Mempool min block delay: {mempool_min_block_delay:,} ms\n' '\n' diff --git a/benchmark/benchmark/plot.py b/benchmark/benchmark/plot.py index 1bff00b2..9c5f45d8 100644 --- a/benchmark/benchmark/plot.py +++ b/benchmark/benchmark/plot.py @@ -64,8 +64,8 @@ def _plot(self, x_label, y_label, y_axis, z_axis, type): raise PlotError('Unequal number of x, y, and y_err values') plt.errorbar( - x_values, y_values, yerr=y_err, # uplims=True, lolims=True, - marker=next(markers), label=z_axis(result), linestyle='dotted' + x_values, y_values, yerr=y_err, label=z_axis(result), + linestyle='dotted', marker=next(markers), capsize=3 ) plt.legend(loc='lower center', bbox_to_anchor=(0.5, 1), ncol=2) diff --git a/consensus/src/consensus.rs b/consensus/src/consensus.rs index 8614d1e0..e50b3de4 100644 --- a/consensus/src/consensus.rs +++ b/consensus/src/consensus.rs @@ -30,6 +30,7 @@ impl Consensus { tx_consensus_mempool: Sender, tx_commit: Sender, ) -> ConsensusResult<()> { + // NOTE: The following log entries are used to compute performance. info!( "Consensus timeout delay set to {} ms", parameters.timeout_delay diff --git a/mempool/src/mempool.rs b/mempool/src/mempool.rs index c30f3e9d..1c0a0b25 100644 --- a/mempool/src/mempool.rs +++ b/mempool/src/mempool.rs @@ -27,10 +27,15 @@ impl Mempool { consensus_channel: Sender, consensus_mempool_channel: Receiver, ) -> MempoolResult<()> { + // NOTE: The following log entries are used to compute performance. info!( "Mempool queue capacity set to {} payloads", parameters.queue_capacity ); + info!( + "Mempool synchronizer retry delay set to {} ms", + parameters.sync_retry_delay + ); info!( "Mempool max payload size set to {} B", parameters.max_payload_size