Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(eigen-client-extra-features): fix clippy and add doc #330

Merged
merged 2 commits into from
Nov 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions core/lib/config/src/configs/da_client/eigen.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ pub enum EigenConfig {
Disperser(DisperserConfig),
}

/// Configuration for the EigenDA in-memory client.
#[derive(Clone, Debug, PartialEq, Deserialize, Default)]
pub struct MemStoreConfig {
pub max_blob_size_bytes: u64,
Expand All @@ -21,18 +22,31 @@ pub struct MemStoreConfig {
pub put_latency: u64,
}

/// Configuration for the EigenDA remote disperser client.
#[derive(Clone, Debug, PartialEq, Deserialize, Default)]
pub struct DisperserConfig {
/// URL of the Disperser RPC server
pub disperser_rpc: String,
/// Block height needed to reach in order to consider the blob finalized
/// a value less or equal to 0 means that the disperser will not wait for finalization
pub eth_confirmation_depth: i32,
/// URL of the Ethereum RPC server
pub eigenda_eth_rpc: String,
/// Address of the service manager contract
pub eigenda_svc_manager_address: String,
/// Maximum size permitted for a blob in bytes
pub blob_size_limit: u32,
/// Maximun amount of time in milliseconds to wait for a status query response
pub status_query_timeout: u64,
/// Interval in milliseconds to query the status of a blob
pub status_query_interval: u64,
/// Wait for the blob to be finalized before returning the response
pub wait_for_finalization: bool,
/// Authenticated dispersal
pub authenticated: bool,
/// Verify the certificate of dispatched blobs
pub verify_cert: bool,
/// Path to the file containing the points used for KZG
pub path_to_points: String,
}

Expand Down
52 changes: 19 additions & 33 deletions core/lib/protobuf_config/src/da_client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,28 +57,21 @@ impl ProtoRepr for proto::DataAvailabilityClient {
let eigen_config = match config {
proto::eigen_config::Config::MemStore(conf) => {
EigenConfig::MemStore(MemStoreConfig {
max_blob_size_bytes: required(&conf.max_blob_size_bytes)
.context("max_blob_size_bytes")?
.clone(),
blob_expiration: required(&conf.blob_expiration)
.context("blob_expiration")?
.clone(),
get_latency: required(&conf.get_latency)
.context("get_latency")?
.clone(),
put_latency: required(&conf.put_latency)
.context("put_latency")?
.clone(),
max_blob_size_bytes: *required(&conf.max_blob_size_bytes)
.context("max_blob_size_bytes")?,
blob_expiration: *required(&conf.blob_expiration)
.context("blob_expiration")?,
get_latency: *required(&conf.get_latency).context("get_latency")?,
put_latency: *required(&conf.put_latency).context("put_latency")?,
})
}
proto::eigen_config::Config::Disperser(conf) => {
EigenConfig::Disperser(DisperserConfig {
disperser_rpc: required(&conf.disperser_rpc)
.context("disperser_rpc")?
.clone(),
eth_confirmation_depth: required(&conf.eth_confirmation_depth)
.context("eth_confirmation_depth")?
.clone(),
eth_confirmation_depth: *required(&conf.eth_confirmation_depth)
.context("eth_confirmation_depth")?,
eigenda_eth_rpc: required(&conf.eigenda_eth_rpc)
.context("eigenda_eth_rpc")?
.clone(),
Expand All @@ -87,24 +80,17 @@ impl ProtoRepr for proto::DataAvailabilityClient {
)
.context("eigenda_svc_manager_address")?
.clone(),
blob_size_limit: required(&conf.blob_size_limit)
.context("blob_size_limit")?
.clone(),
status_query_timeout: required(&conf.status_query_timeout)
.context("status_query_timeout")?
.clone(),
status_query_interval: required(&conf.status_query_interval)
.context("status_query_interval")?
.clone(),
wait_for_finalization: required(&conf.wait_for_finalization)
.context("wait_for_finalization")?
.clone(),
authenticated: required(&conf.authenticated)
.context("authenticated")?
.clone(),
verify_cert: required(&conf.verify_cert)
.context("verify_cert")?
.clone(),
blob_size_limit: *required(&conf.blob_size_limit)
.context("blob_size_limit")?,
status_query_timeout: *required(&conf.status_query_timeout)
.context("status_query_timeout")?,
status_query_interval: *required(&conf.status_query_interval)
.context("status_query_interval")?,
wait_for_finalization: *required(&conf.wait_for_finalization)
.context("wait_for_finalization")?,
authenticated: *required(&conf.authenticated)
.context("authenticated")?,
verify_cert: *required(&conf.verify_cert).context("verify_cert")?,
path_to_points: required(&conf.path_to_points)
.context("path_to_points")?
.clone(),
Expand Down
27 changes: 13 additions & 14 deletions core/node/da_clients/src/eigen/blob_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ pub struct G1Commitment {
}

impl G1Commitment {
pub fn into_bytes(&self) -> Vec<u8> {
pub fn to_bytes(&self) -> Vec<u8> {
let mut bytes = vec![];
bytes.extend(&self.x.len().to_be_bytes());
bytes.extend(&self.x);
Expand Down Expand Up @@ -78,7 +78,7 @@ pub struct BlobQuorumParam {
}

impl BlobQuorumParam {
pub fn into_bytes(&self) -> Vec<u8> {
pub fn to_bytes(&self) -> Vec<u8> {
let mut bytes = vec![];
bytes.extend(&self.quorum_number.to_be_bytes());
bytes.extend(&self.adversary_threshold_percentage.to_be_bytes());
Expand Down Expand Up @@ -129,14 +129,14 @@ pub struct BlobHeader {
}

impl BlobHeader {
pub fn into_bytes(&self) -> Vec<u8> {
pub fn to_bytes(&self) -> Vec<u8> {
let mut bytes = vec![];
bytes.extend(self.commitment.into_bytes());
bytes.extend(self.commitment.to_bytes());
bytes.extend(&self.data_length.to_be_bytes());
bytes.extend(&self.blob_quorum_params.len().to_be_bytes());

for quorum in &self.blob_quorum_params {
bytes.extend(quorum.into_bytes());
bytes.extend(quorum.to_bytes());
}

bytes
Expand Down Expand Up @@ -177,7 +177,6 @@ impl TryFrom<DisperserBlobHeader> for BlobHeader {
.iter()
.map(|param| BlobQuorumParam::from(param.clone()))
.collect();
let blob_quorum_params = blob_quorum_params;
Ok(Self {
commitment: G1Commitment::from(value.commitment.unwrap()),
data_length: value.data_length,
Expand All @@ -195,7 +194,7 @@ pub struct BatchHeader {
}

impl BatchHeader {
pub fn into_bytes(&self) -> Vec<u8> {
pub fn to_bytes(&self) -> Vec<u8> {
let mut bytes = vec![];
bytes.extend(&self.batch_root.len().to_be_bytes());
bytes.extend(&self.batch_root);
Expand Down Expand Up @@ -251,9 +250,9 @@ pub struct BatchMetadata {
}

impl BatchMetadata {
pub fn into_bytes(&self) -> Vec<u8> {
pub fn to_bytes(&self) -> Vec<u8> {
let mut bytes = vec![];
bytes.extend(self.batch_header.into_bytes());
bytes.extend(self.batch_header.to_bytes());
bytes.extend(&self.signatory_record_hash);
bytes.extend(&self.confirmation_block_number.to_be_bytes());

Expand Down Expand Up @@ -312,11 +311,11 @@ pub struct BlobVerificationProof {
}

impl BlobVerificationProof {
pub fn into_bytes(&self) -> Vec<u8> {
pub fn to_bytes(&self) -> Vec<u8> {
let mut bytes = vec![];
bytes.extend(&self.batch_id.to_be_bytes());
bytes.extend(&self.blob_index.to_be_bytes());
bytes.extend(self.batch_medatada.into_bytes());
bytes.extend(self.batch_medatada.to_bytes());
bytes.extend(&self.inclusion_proof.len().to_be_bytes());
bytes.extend(&self.inclusion_proof);
bytes.extend(&self.quorum_indexes.len().to_be_bytes());
Expand Down Expand Up @@ -372,12 +371,12 @@ pub struct BlobInfo {
}

impl BlobInfo {
pub fn into_bytes(&self) -> Vec<u8> {
pub fn to_bytes(&self) -> Vec<u8> {
let mut bytes = vec![];
let blob_header_bytes = self.blob_header.into_bytes();
let blob_header_bytes = self.blob_header.to_bytes();
bytes.extend(blob_header_bytes.len().to_be_bytes());
bytes.extend(blob_header_bytes);
let blob_verification_proof_bytes = self.blob_verification_proof.into_bytes();
let blob_verification_proof_bytes = self.blob_verification_proof.to_bytes();
bytes.extend(blob_verification_proof_bytes);
bytes
}
Expand Down
6 changes: 5 additions & 1 deletion core/node/da_clients/src/eigen/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@ use zksync_da_client::{
use super::{blob_info::BlobInfo, memstore::MemStore, sdk::RawEigenClient, Disperser};
use crate::utils::to_non_retriable_da_error;

/// EigenClient is a client for the Eigen DA service.
/// It can be configured to use one of two dispersal methods:
/// - Remote: Dispatch blobs to a remote Eigen service.
/// - Memstore: Stores blobs in memory, used for testing purposes.
#[derive(Debug, Clone)]
pub struct EigenClient {
client: Disperser,
Expand Down Expand Up @@ -105,12 +109,12 @@ impl EigenClient {
}
#[cfg(test)]
mod tests {
use serial_test::serial;
use zksync_config::configs::da_client::eigen::{DisperserConfig, MemStoreConfig};
use zksync_types::secrets::PrivateKey;

use super::*;
use crate::eigen::blob_info::BlobInfo;
use serial_test::serial;

#[tokio::test]
#[serial]
Expand Down
10 changes: 10 additions & 0 deletions core/node/da_clients/src/eigen/generated/eigendaservicemanager.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,12 @@
// Disable clippy checks on autogenerated code
#![allow(
dead_code,
clippy::identity_op,
clippy::useless_conversion,
clippy::clone_on_copy,
clippy::needless_lifetimes,
clippy::type_complexity
)]
/**

Generated by the following Solidity interface...
Expand Down Expand Up @@ -1237,6 +1246,7 @@ interface EigenDAServiceManager {
pub mod EigenDAServiceManager {
use alloy::sol_types as alloy_sol_types;

#[allow(unused_imports)]
use super::*;
/// The creation / init bytecode of the contract.
///
Expand Down
12 changes: 6 additions & 6 deletions core/node/da_clients/src/eigen/memstore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use rand::{rngs::OsRng, Rng, RngCore};
use sha3::{Digest, Keccak256};
use tokio::time::interval;
use zksync_config::configs::da_client::eigen::MemStoreConfig;
use zksync_da_client::types::{DAError, DispatchResponse, InclusionData};
use zksync_da_client::types::{DAError, InclusionData};

use super::blob_info::{self, BlobInfo};

Expand All @@ -22,9 +22,9 @@ pub enum MemStoreError {
BlobNotFound,
}

impl Into<Error> for MemStoreError {
fn into(self) -> Error {
match self {
impl From<MemStoreError> for Error {
fn from(val: MemStoreError) -> Self {
match val {
MemStoreError::BlobToLarge => Error::msg("Blob too large"),
MemStoreError::BlobAlreadyExists => Error::msg("Blob already exists"),
MemStoreError::IncorrectCommitment => Error::msg("Incorrect commitment"),
Expand Down Expand Up @@ -68,14 +68,14 @@ impl MemStore {
pub async fn put_blob(self: Arc<Self>, value: Vec<u8>) -> Result<String, MemStoreError> {
tokio::time::sleep(Duration::from_millis(self.config.put_latency)).await;
if value.len() as u64 > self.config.max_blob_size_bytes {
return Err(MemStoreError::BlobToLarge.into());
return Err(MemStoreError::BlobToLarge);
}

let mut entropy = [0u8; 10];
OsRng.fill_bytes(&mut entropy);

let mut hasher = Keccak256::new();
hasher.update(&entropy);
hasher.update(entropy);
let mock_batch_root = hasher.finalize().to_vec();

let block_num = OsRng.gen_range(0u32..1000);
Expand Down
10 changes: 5 additions & 5 deletions core/node/da_clients/src/eigen/sdk.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ use crate::eigen::{
};

#[derive(Debug, Clone)]
pub struct RawEigenClient {
pub(crate) struct RawEigenClient {
client: DisperserClient<Channel>,
private_key: SecretKey,
pub config: DisperserConfig,
Expand All @@ -40,8 +40,8 @@ impl RawEigenClient {
pub(crate) const BUFFER_SIZE: usize = 1000;

pub async fn new(private_key: SecretKey, config: DisperserConfig) -> anyhow::Result<Self> {
let endpoint = Endpoint::from_str(&config.disperser_rpc.as_str())?
.tls_config(ClientTlsConfig::new())?;
let endpoint =
Endpoint::from_str(config.disperser_rpc.as_str())?.tls_config(ClientTlsConfig::new())?;
let client = DisperserClient::connect(endpoint)
.await
.map_err(|e| anyhow::anyhow!("Failed to connect to Disperser server: {}", e))?;
Expand Down Expand Up @@ -338,15 +338,15 @@ impl RawEigenClient {
})?
.into_inner();

if get_response.data.len() == 0 {
if get_response.data.is_empty() {
return Err(DAError {
error: anyhow!("Failed to get blob data"),
is_retriable: false,
});
}
//TODO: remove zkgpad_rs
let data = kzgpad_rs::remove_empty_byte_from_padded_bytes(&get_response.data);
return Ok(Some(data));
Ok(Some(data))
}
}

Expand Down
13 changes: 7 additions & 6 deletions core/node/da_clients/src/eigen/verifier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ pub enum VerificationError {
CommitmentNotOnCorrectSubgroup,
}

/// Configuration for the verifier used for authenticated dispersals
#[derive(Debug, Clone)]
pub struct VerifierConfig {
pub verify_certs: bool,
Expand Down Expand Up @@ -153,7 +154,7 @@ impl Verifier {
index: u32,
) -> Result<Vec<u8>, VerificationError> {
let mut index = index;
if proof.len() == 0 || proof.len() % 32 != 0 {
if proof.is_empty() || proof.len() % 32 != 0 {
return Err(VerificationError::WrongProof);
}
let mut computed_hash = leaf.to_vec();
Expand Down Expand Up @@ -256,7 +257,7 @@ impl Verifier {
if self.cfg.eth_confirmation_depth == 0 {
return Ok(latest);
}
return Ok(latest - (self.cfg.eth_confirmation_depth as u64 - 1));
Ok(latest - (self.cfg.eth_confirmation_depth as u64 - 1))
}

/// Verifies the certificate batch hash
Expand Down Expand Up @@ -405,7 +406,7 @@ mod test {
};
let blob = vec![1u8; 100]; // Actual blob sent was this blob but kzg-padded, but Blob::from_bytes_and_pad padds it inside, so we don't need to pad it here.
let result = verifier.verify_commitment(commitment, blob);
assert_eq!(result.is_ok(), true);
assert!(result.is_ok());
}

#[test]
Expand Down Expand Up @@ -492,7 +493,7 @@ mod test {
},
};
let result = verifier.verify_merkle_proof(cert);
assert_eq!(result.is_ok(), true);
assert!(result.is_ok());
}

#[test]
Expand Down Expand Up @@ -648,7 +649,7 @@ mod test {
},
};
let result = verifier.verify_batch(cert).await;
assert_eq!(result.is_ok(), true);
assert!(result.is_ok());
}

#[tokio::test]
Expand Down Expand Up @@ -735,6 +736,6 @@ mod test {
},
};
let result = verifier.verify_security_params(cert).await;
assert_eq!(result.is_ok(), true);
assert!(result.is_ok());
}
}
Loading