diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9fde3899..4262f214 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -33,4 +33,6 @@ jobs: uses: actions/checkout@v4 - name: Install stable toolchain uses: dtolnay/rust-toolchain@stable - - run: cargo test --all-features \ No newline at end of file + - run: cargo test --all-features + env: + CB_TESTS_DIR: ${{ runner.temp }} diff --git a/Cargo.lock b/Cargo.lock index a03ee6f2..d9e35119 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -24,12 +24,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" dependencies = [ "cfg-if", - "cipher", + "cipher 0.3.0", "cpufeatures", - "ctr", + "ctr 0.8.0", "opaque-debug", ] +[[package]] +name = "aes" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" +dependencies = [ + "cfg-if", + "cipher 0.4.4", + "cpufeatures", +] + [[package]] name = "ahash" version = "0.8.11" @@ -1134,20 +1145,26 @@ dependencies = [ name = "cb-common" version = "0.3.1" dependencies = [ + "aes 0.8.4", "alloy", "axum", + "base64 0.22.1", "bimap", "blst", + "cipher 0.4.4", + "ctr 0.9.2", "derive_more", "eth2_keystore", "ethereum_serde_utils 0.7.0", "eyre", "k256", + "pbkdf2 0.12.2", "rand", "reqwest", "serde", "serde_json", "serde_yaml", + "sha2 0.10.8", "ssz_types", "thiserror", "tokio", @@ -1157,6 +1174,7 @@ dependencies = [ "tracing-subscriber", "tree_hash 0.8.0", "tree_hash_derive", + "unicode-normalization", "url", ] @@ -1272,6 +1290,16 @@ dependencies = [ "generic-array", ] +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", +] + [[package]] name = "clap" version = "4.5.4" @@ -1500,7 +1528,16 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea" dependencies = [ - "cipher", + "cipher 0.3.0", +] + +[[package]] +name = "ctr" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" +dependencies = [ + "cipher 0.4.4", ] [[package]] @@ -1801,12 +1838,12 @@ name = "eth2_keystore" version = "0.1.0" source = "git+https://github.com/sigp/lighthouse?rev=9e12c21f268c80a3f002ae0ca27477f9f512eb6f#9e12c21f268c80a3f002ae0ca27477f9f512eb6f" dependencies = [ - "aes", + "aes 0.7.5", "bls", "eth2_key_derivation", "hex", "hmac 0.11.0", - "pbkdf2", + "pbkdf2 0.8.0", "rand", "scrypt", "serde", @@ -2519,6 +2556,15 @@ dependencies = [ "serde", ] +[[package]] +name = "inout" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +dependencies = [ + "generic-array", +] + [[package]] name = "ipnet" version = "2.9.0" @@ -2959,6 +3005,16 @@ dependencies = [ "crypto-mac", ] +[[package]] +name = "pbkdf2" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" +dependencies = [ + "digest 0.10.7", + "hmac 0.12.1", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -3463,7 +3519,7 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecbd2eb639fd7cab5804a0837fe373cc2172d15437e804c054a9fb885cb923b0" dependencies = [ - "cipher", + "cipher 0.3.0", ] [[package]] @@ -3499,7 +3555,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879588d8f90906e73302547e20fffefdd240eb3e0e744e142321f5d49dea0518" dependencies = [ "hmac 0.11.0", - "pbkdf2", + "pbkdf2 0.8.0", "salsa20", "sha2 0.9.9", ] @@ -4397,9 +4453,9 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] diff --git a/Cargo.toml b/Cargo.toml index 91198e15..b1457fdc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,6 +55,8 @@ toml = "0.8.13" serde = { version = "1.0.202", features = ["derive"] } serde_json = "1.0.117" serde_yaml = "0.9.33" +base64 = "0.22.1" +unicode-normalization = "0.1.24" # telemetry tracing = "0.1.40" @@ -68,6 +70,11 @@ tree_hash = "0.8" tree_hash_derive = "0.8" eth2_keystore = { git = "https://github.com/sigp/lighthouse", rev = "9e12c21f268c80a3f002ae0ca27477f9f512eb6f" } k256 = "0.13" +aes = "0.8" +ctr = "0.9.2" +cipher = "0.4" +pbkdf2 = "0.12.2" +sha2 = "0.10.8" # docker docker-compose-types = "0.12.0" diff --git a/config.example.toml b/config.example.toml index 4c019706..b361f3c0 100644 --- a/config.example.toml +++ b/config.example.toml @@ -3,7 +3,7 @@ # Chain spec ID. Supported values: # A network ID. Supported values: Mainnet, Holesky, Sepolia, Helder. -# A path to a chain spec file, either in .json format (e.g., as returned by the beacon endpoint /eth/v1/config/spec), or in .yml format (see examples in tests/data). +# A custom object, e.g., chain = { genesis_time_secs = 1695902400, path = "/path/to/spec.json" }, with a path to a chain spec file, either in .json format (e.g., as returned by the beacon endpoint /eth/v1/config/spec), or in .yml format (see examples in tests/data). # A custom object, e.g., chain = { genesis_time_secs = 1695902400, slot_time_secs = 12, genesis_fork_version = "0x01017000" }. chain = "Holesky" @@ -97,6 +97,32 @@ target_first_request_ms = 200 # OPTIONAL frequency_get_header_ms = 300 +# Configuration for the PBS multiplexers, which enable different configs to be used for get header requests, depending on validator pubkey +# Note that: +# - multiple sets of keys can be defined by adding multiple [[mux]] sections. The validator pubkey sets need to be disjoint +# - the mux is only used for get header requests +# - if any value is missing from the mux config, the default value from the main config will be used +[[mux]] +# Unique ID for the mux config +id = "test_mux" +# Which validator pubkeys to match against this mux config. This can be empty or omitted if a loader is specified. +# Any keys loaded via the loader will be added to this list. +validator_pubkeys = [ + "0x80c7f782b2467c5898c5516a8b6595d75623960b4afc4f71ee07d40985d20e117ba35e7cd352a3e75fb85a8668a3b745", + "0xa119589bb33ef52acbb8116832bec2b58fca590fe5c85eac5d3230b44d5bc09fe73ccd21f88eab31d6de16194d17782e", +] +# Path to a file containing a list of validator pubkeys +# OPTIONAL +loader = "./mux_keys.example.json" +timeout_get_header_ms = 900 +late_in_slot_time_ms = 1500 +# For each mux, one or more [[pbs_mux.relays]] can be defined, which will be used for the matching validator pubkeys +# Only the relays defined here will be used, and the rest of the relays defined in the main config will be ignored +# Any field defined here will override the default value from the relay config with the same id in [[relays]] +[[mux.relays]] +id = "example-relay" +headers = { X-MyCustomHeader = "ADifferentCustomValue" } + # Configuration for the Signer Module, only required if any `commit` module is present, or if `pbs.with_signer = true` # OPTIONAL [signer] @@ -105,20 +131,35 @@ frequency_get_header_ms = 300 docker_image = "ghcr.io/commit-boost/signer:latest" # Configuration for how the Signer module should load validator keys. Currently two types of loaders are supported: # - File: load keys from a plain text file (unsafe, use only for testing purposes) -# - ValidatorsDir: load keys from a `keys` and `secrets` folder (ERC-2335 style keystores as used in Lighthouse) +# - ValidatorsDir: load keys from a `keys` and `secrets` file/folder (ERC-2335 style keystores). More details can be found in the docs (https://commit-boost.github.io/commit-boost-client/get_started/configuration/) [signer.loader] # File: path to the keys file key_path = "./keys.example.json" -# ValidatorsDir: path to the keys directory +# ValidatorsDir: format of the keystore (lighthouse, prysm, teku or lodestar) +# format = "lighthouse" +# ValidatorsDir: full path to the keys directory +# For lighthouse, it's de path to the directory where the `/voting-keystore.json` directories are located. +# For prysm, it's the path to the `all-accounts.keystore.json` file. +# For teku, it's the path to the directory where all `.json` files are located. +# For lodestar, it's the path to the directory where all `.json` files are located. # keys_path = "" -# ValidatorsDir: path to the secrets directory +# ValidatorsDir: full path to the secrets file/directory +# For lighthouse, it's de path to the directory where the `.json` files are located. +# For prysm, it's the path to the file containing the wallet decryption password. +# For teku, it's the path to the directory where all `.txt` files are located. +# For lodestar, it's the path to the file containing the decryption password. # secrets_path = "" -# Configuration for how the Signer module should store proxy delegations. Currently one type of store is supported: +# Configuration for how the Signer module should store proxy delegations. Supported types of store are: # - File: store keys and delegations from a plain text file (unsafe, use only for testing purposes) +# - ERC2335: store keys and delegations safely using ERC-2335 style keystores. More details can be found in the docs (https://commit-boost.github.io/commit-boost-client/get_started/configuration#proxy-keys-store) # OPTIONAL, if missing proxies are lost on restart [signer.store] # File: path to the keys file proxy_dir = "./proxies" +# ERC2335: path to the keys directory +# keys_path = "" +# ERC2335: path to the secrets directory +# secrets_path = "" # Commit-Boost can optionally run "modules" which extend the capabilities of the sidecar. # Currently, two types of modules are supported: diff --git a/configs/custom_chain.toml b/configs/custom_chain.toml new file mode 100644 index 00000000..58c516ad --- /dev/null +++ b/configs/custom_chain.toml @@ -0,0 +1,11 @@ +# PBS config with a custom chain spec file + +# genesis time in seconds needs to be specified +chain = { genesis_time_secs = 100, path = "tests/data/holesky_spec.json" } + +[pbs] +port = 18550 + +[[relays]] +id = "example-relay" +url = "http://0xa1cec75a3f0661e99299274182938151e8433c61a19222347ea1313d839229cb4ce4e3e5aa2bdeb71c8fcf1b084963c2@abc.xyz" diff --git a/configs/pbs-mux.toml b/configs/pbs-mux.toml new file mode 100644 index 00000000..f9efa1d5 --- /dev/null +++ b/configs/pbs-mux.toml @@ -0,0 +1,31 @@ +# PBS config with a mux for a single validator + +chain = "Holesky" + +[pbs] +port = 18550 +timeout_get_header_ms = 950 +late_in_slot_time_ms = 2000 + +[[relays]] +id = "relay-1" +url = "http://0xa1cec75a3f0661e99299274182938151e8433c61a19222347ea1313d839229cb4ce4e3e5aa2bdeb71c8fcf1b084963c2@abc.xyz" + +[[relays]] +id = "relay-2" +url = "http://0xa119589bb33ef52acbb8116832bec2b58fca590fe5c85eac5d3230b44d5bc09fe73ccd21f88eab31d6de16194d17782e@def.xyz" +enable_timing_games = true +target_first_request_ms = 200 + +[[mux]] +id = "test_mux" +validator_pubkeys = [ + "0x80c7f782b2467c5898c5516a8b6595d75623960b4afc4f71ee07d40985d20e117ba35e7cd352a3e75fb85a8668a3b745", +] +loader = "./mux_keys.example.json" +timeout_get_header_ms = 900 +late_in_slot_time_ms = 1500 + +[[mux.relays]] +id = "relay-2" +enable_timing_games = false diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index 8da6e7ea..6813d595 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -9,7 +9,8 @@ use cb_common::{ CommitBoostConfig, LogsSettings, ModuleKind, BUILDER_PORT_ENV, BUILDER_URLS_ENV, CHAIN_SPEC_ENV, CONFIG_DEFAULT, CONFIG_ENV, JWTS_ENV, LOGS_DIR_DEFAULT, LOGS_DIR_ENV, METRICS_PORT_ENV, MODULE_ID_ENV, MODULE_JWT_ENV, PBS_ENDPOINT_ENV, PBS_MODULE_NAME, - PROXY_DIR_DEFAULT, PROXY_DIR_ENV, SIGNER_DEFAULT, SIGNER_DIR_KEYS_DEFAULT, + PROXY_DIR_DEFAULT, PROXY_DIR_ENV, PROXY_DIR_KEYS_DEFAULT, PROXY_DIR_KEYS_ENV, + PROXY_DIR_SECRETS_DEFAULT, PROXY_DIR_SECRETS_ENV, SIGNER_DEFAULT, SIGNER_DIR_KEYS_DEFAULT, SIGNER_DIR_KEYS_ENV, SIGNER_DIR_SECRETS_DEFAULT, SIGNER_DIR_SECRETS_ENV, SIGNER_KEYS_ENV, SIGNER_MODULE_NAME, SIGNER_PORT_ENV, SIGNER_URL_ENV, }, @@ -219,6 +220,17 @@ pub fn handle_docker_init(config_path: String, output_dir: String) -> Result<()> } let mut pbs_envs = IndexMap::from([get_env_val(CONFIG_ENV, CONFIG_DEFAULT)]); + let mut pbs_volumes = vec![config_volume.clone()]; + + if let Some(mux_config) = cb_config.muxes { + for mux in mux_config.muxes.iter() { + if let Some((env_name, actual_path, internal_path)) = mux.loader_env() { + let (key, val) = get_env_val(&env_name, &internal_path); + pbs_envs.insert(key, val); + pbs_volumes.push(Volumes::Simple(format!("{}:{}:ro", actual_path, internal_path))); + } + } + } if let Some((key, val)) = chain_spec_env.clone() { pbs_envs.insert(key, val); @@ -251,7 +263,6 @@ pub fn handle_docker_init(config_path: String, output_dir: String) -> Result<()> pbs_envs.insert(key, val); // volumes - let mut pbs_volumes = vec![config_volume.clone()]; pbs_volumes.extend(chain_spec_volume.clone()); pbs_volumes.extend(get_log_volume(&cb_config.logs, PBS_MODULE_NAME)); @@ -319,7 +330,7 @@ pub fn handle_docker_init(config_path: String, output_dir: String) -> Result<()> let (k, v) = get_env_val(SIGNER_KEYS_ENV, SIGNER_DEFAULT); signer_envs.insert(k, v); } - SignerLoader::ValidatorsDir { keys_path, secrets_path } => { + SignerLoader::ValidatorsDir { keys_path, secrets_path, format: _ } => { volumes.push(Volumes::Simple(format!( "{}:{}:ro", keys_path.display(), @@ -349,6 +360,23 @@ pub fn handle_docker_init(config_path: String, output_dir: String) -> Result<()> let (k, v) = get_env_val(PROXY_DIR_ENV, PROXY_DIR_DEFAULT); signer_envs.insert(k, v); } + ProxyStore::ERC2335 { keys_path, secrets_path } => { + volumes.push(Volumes::Simple(format!( + "{}:{}:rw", + keys_path.display(), + PROXY_DIR_KEYS_DEFAULT + ))); + let (k, v) = get_env_val(PROXY_DIR_KEYS_ENV, PROXY_DIR_KEYS_DEFAULT); + signer_envs.insert(k, v); + + volumes.push(Volumes::Simple(format!( + "{}:{}:rw", + secrets_path.display(), + PROXY_DIR_SECRETS_DEFAULT + ))); + let (k, v) = get_env_val(PROXY_DIR_SECRETS_ENV, PROXY_DIR_SECRETS_DEFAULT); + signer_envs.insert(k, v); + } } } diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index 90e5df64..5cb8a410 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -7,7 +7,7 @@ publish = false [dependencies] # ethereum -alloy = { workspace = true } +alloy.workspace = true ssz_types.workspace = true ethereum_serde_utils.workspace = true @@ -35,6 +35,11 @@ tree_hash.workspace = true tree_hash_derive.workspace = true eth2_keystore.workspace = true k256.workspace = true +aes.workspace = true +ctr.workspace = true +cipher.workspace = true +pbkdf2.workspace = true +sha2.workspace = true # misc thiserror.workspace = true @@ -43,3 +48,6 @@ url.workspace = true rand.workspace = true bimap.workspace = true derive_more.workspace = true + +unicode-normalization.workspace = true +base64.workspace = true diff --git a/crates/common/src/commit/request.rs b/crates/common/src/commit/request.rs index f2adeba2..7f6ce49e 100644 --- a/crates/common/src/commit/request.rs +++ b/crates/common/src/commit/request.rs @@ -1,4 +1,7 @@ -use std::fmt::{self, Debug, Display, LowerHex}; +use std::{ + fmt::{self, Debug, Display, LowerHex}, + str::FromStr, +}; use alloy::rpc::types::beacon::BlsSignature; use derive_more::derive::From; @@ -133,6 +136,27 @@ pub enum EncryptionScheme { Ecdsa, } +impl Display for EncryptionScheme { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + EncryptionScheme::Bls => write!(f, "bls"), + EncryptionScheme::Ecdsa => write!(f, "ecdsa"), + } + } +} + +impl FromStr for EncryptionScheme { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "bls" => Ok(EncryptionScheme::Bls), + "ecdsa" => Ok(EncryptionScheme::Ecdsa), + _ => Err(format!("Unknown scheme: {s}")), + } + } +} + // TODO(David): This struct shouldn't be visible to module authors #[derive(Debug, Clone, Serialize, Deserialize)] pub struct GenerateProxyRequest { diff --git a/crates/common/src/config/constants.rs b/crates/common/src/config/constants.rs index 123df0ad..03d990e8 100644 --- a/crates/common/src/config/constants.rs +++ b/crates/common/src/config/constants.rs @@ -25,6 +25,8 @@ pub const BUILDER_URLS_ENV: &str = "CB_BUILDER_URLS"; /// Where to receive BuilderAPI calls from beacon node pub const PBS_ENDPOINT_ENV: &str = "CB_PBS_ENDPOINT"; +pub const MUX_PATH_ENV: &str = "CB_MUX_PATH"; + ///////////////////////// SIGNER ///////////////////////// pub const SIGNER_IMAGE_DEFAULT: &str = "ghcr.io/commit-boost/signer:latest"; @@ -45,9 +47,15 @@ pub const SIGNER_DIR_KEYS_DEFAULT: &str = "/keys"; /// Path to `secrets` folder pub const SIGNER_DIR_SECRETS_ENV: &str = "CB_SIGNER_LOADER_SECRETS_DIR"; pub const SIGNER_DIR_SECRETS_DEFAULT: &str = "/secrets"; -/// Path to store proxies +/// Path to store proxies with plaintext keys (testing only) pub const PROXY_DIR_ENV: &str = "CB_PROXY_STORE_DIR"; pub const PROXY_DIR_DEFAULT: &str = "/proxies"; +/// Path to store proxy keys +pub const PROXY_DIR_KEYS_ENV: &str = "CB_PROXY_KEYS_DIR"; +pub const PROXY_DIR_KEYS_DEFAULT: &str = "/proxy_keys"; +/// Path to store proxy secrets +pub const PROXY_DIR_SECRETS_ENV: &str = "CB_PROXY_SECRETS_DIR"; +pub const PROXY_DIR_SECRETS_DEFAULT: &str = "/proxy_secrets"; ///////////////////////// MODULES ///////////////////////// diff --git a/crates/common/src/config/mod.rs b/crates/common/src/config/mod.rs index ac537cbd..70acfadd 100644 --- a/crates/common/src/config/mod.rs +++ b/crates/common/src/config/mod.rs @@ -3,12 +3,13 @@ use std::path::PathBuf; use eyre::Result; use serde::{Deserialize, Serialize}; -use crate::types::{load_chain_from_file, Chain, ChainLoader}; +use crate::types::{load_chain_from_file, Chain, ChainLoader, ForkVersion}; mod constants; mod log; mod metrics; mod module; +mod mux; mod pbs; mod signer; mod utils; @@ -17,6 +18,7 @@ pub use constants::*; pub use log::*; pub use metrics::*; pub use module::*; +pub use mux::*; pub use pbs::*; pub use signer::*; pub use utils::*; @@ -26,6 +28,8 @@ pub struct CommitBoostConfig { pub chain: Chain, pub relays: Vec, pub pbs: StaticPbsConfig, + #[serde(flatten)] + pub muxes: Option, pub modules: Option>, pub signer: Option, pub metrics: Option, @@ -48,22 +52,35 @@ impl CommitBoostConfig { // When loading the config from the environment, it's important that every path // is replaced with the correct value if the config is loaded inside a container pub fn from_env_path() -> Result { - let config = if let Some(path) = load_optional_env_var(CHAIN_SPEC_ENV) { - // if the chain spec file is set, load it separately - let chain: Chain = load_chain_from_file(path.parse()?)?; - let rest_config: HelperConfig = load_file_from_env(CONFIG_ENV)?; + let helper_config: HelperConfig = load_file_from_env(CONFIG_ENV)?; - CommitBoostConfig { - chain, - relays: rest_config.relays, - pbs: rest_config.pbs, - modules: rest_config.modules, - signer: rest_config.signer, - metrics: rest_config.metrics, - logs: rest_config.logs, + let chain = match helper_config.chain { + ChainLoader::Path { path, genesis_time_secs } => { + // check if the file path is overridden by env var + let (slot_time_secs, genesis_fork_version) = + if let Some(path) = load_optional_env_var(CHAIN_SPEC_ENV) { + load_chain_from_file(path.parse()?)? + } else { + load_chain_from_file(path)? + }; + Chain::Custom { genesis_time_secs, slot_time_secs, genesis_fork_version } } - } else { - load_file_from_env(CONFIG_ENV)? + ChainLoader::Known(known) => Chain::from(known), + ChainLoader::Custom { genesis_time_secs, slot_time_secs, genesis_fork_version } => { + let genesis_fork_version: ForkVersion = genesis_fork_version.as_ref().try_into()?; + Chain::Custom { genesis_time_secs, slot_time_secs, genesis_fork_version } + } + }; + + let config = CommitBoostConfig { + chain, + relays: helper_config.relays, + pbs: helper_config.pbs, + muxes: helper_config.muxes, + modules: helper_config.modules, + signer: helper_config.signer, + metrics: helper_config.metrics, + logs: helper_config.logs, }; config.validate()?; @@ -74,8 +91,8 @@ impl CommitBoostConfig { pub fn chain_spec_file(path: &str) -> Option { match load_from_file::(path) { Ok(config) => { - if let ChainLoader::Path(path_buf) = config.chain { - Some(path_buf) + if let ChainLoader::Path { path, genesis_time_secs: _ } = config.chain { + Some(path) } else { None } @@ -94,8 +111,11 @@ struct ChainConfig { /// Helper struct to load the rest of the config #[derive(Deserialize)] struct HelperConfig { + chain: ChainLoader, relays: Vec, pbs: StaticPbsConfig, + #[serde(flatten)] + muxes: Option, modules: Option>, signer: Option, metrics: Option, diff --git a/crates/common/src/config/mux.rs b/crates/common/src/config/mux.rs new file mode 100644 index 00000000..17ab084b --- /dev/null +++ b/crates/common/src/config/mux.rs @@ -0,0 +1,206 @@ +use std::{ + collections::{HashMap, HashSet}, + path::{Path, PathBuf}, + sync::Arc, +}; + +use alloy::rpc::types::beacon::BlsPublicKey; +use eyre::{bail, ensure, eyre, Context}; +use serde::{Deserialize, Serialize}; + +use super::{load_optional_env_var, PbsConfig, RelayConfig, MUX_PATH_ENV}; +use crate::pbs::{RelayClient, RelayEntry}; + +#[derive(Debug, Deserialize, Serialize)] +pub struct PbsMuxes { + /// List of PBS multiplexers + #[serde(rename = "mux")] + pub muxes: Vec, +} + +#[derive(Debug, Clone)] +pub struct RuntimeMuxConfig { + pub id: String, + pub config: Arc, + pub relays: Vec, +} + +impl PbsMuxes { + pub fn validate_and_fill( + self, + default_pbs: &PbsConfig, + default_relays: &[RelayConfig], + ) -> eyre::Result> { + let mut muxes = self.muxes; + + for mux in muxes.iter_mut() { + if let Some(loader) = &mux.loader { + let extra_keys = loader.load(&mux.id)?; + mux.validator_pubkeys.extend(extra_keys); + } + } + + // check that validator pubkeys are in disjoint sets + let mut unique_pubkeys = HashSet::new(); + for mux in muxes.iter() { + for pubkey in mux.validator_pubkeys.iter() { + if !unique_pubkeys.insert(pubkey) { + bail!("duplicate validator pubkey in muxes: {pubkey}"); + } + } + } + + let mut configs = HashMap::new(); + // fill the configs using the default pbs config and relay entries + for mux in muxes { + ensure!(!mux.relays.is_empty(), "mux config {} must have at least one relay", mux.id); + ensure!( + !mux.validator_pubkeys.is_empty(), + "mux config {} must have at least one validator pubkey", + mux.id + ); + + let mut relay_clients = Vec::with_capacity(mux.relays.len()); + for partial_relay in mux.relays.into_iter() { + // create a new config overriding only the missing fields + let partial_id = partial_relay.id()?; + // assume that there is always a relay defined in the default config. If this + // becomes too much of a burden, we can change this to allow defining relays + // that are exclusively used by a mux + let default_relay = default_relays + .iter() + .find(|r| r.id() == partial_id) + .ok_or_else(|| eyre!("default relay config not found for: {}", partial_id))?; + + let full_config = RelayConfig { + id: Some(partial_id.to_string()), + entry: partial_relay.entry.unwrap_or(default_relay.entry.clone()), + headers: partial_relay.headers.or(default_relay.headers.clone()), + enable_timing_games: partial_relay + .enable_timing_games + .unwrap_or(default_relay.enable_timing_games), + target_first_request_ms: partial_relay + .target_first_request_ms + .or(default_relay.target_first_request_ms), + frequency_get_header_ms: partial_relay + .frequency_get_header_ms + .or(default_relay.frequency_get_header_ms), + }; + + relay_clients.push(RelayClient::new(full_config)?); + } + + let config = PbsConfig { + timeout_get_header_ms: mux + .timeout_get_header_ms + .unwrap_or(default_pbs.timeout_get_header_ms), + late_in_slot_time_ms: mux + .late_in_slot_time_ms + .unwrap_or(default_pbs.late_in_slot_time_ms), + ..default_pbs.clone() + }; + let config = Arc::new(config); + + let runtime_config = RuntimeMuxConfig { id: mux.id, config, relays: relay_clients }; + for pubkey in mux.validator_pubkeys.iter() { + configs.insert(*pubkey, runtime_config.clone()); + } + } + + Ok(configs) + } +} + +/// Configuration for the PBS Multiplexer +#[derive(Debug, Deserialize, Serialize)] +pub struct MuxConfig { + /// Identifier for this mux config + pub id: String, + /// Relays to use for this mux config + pub relays: Vec, + /// Which validator pubkeys to match against this mux config + #[serde(default)] + pub validator_pubkeys: Vec, + /// Loader for extra validator pubkeys + pub loader: Option, + pub timeout_get_header_ms: Option, + pub late_in_slot_time_ms: Option, +} + +impl MuxConfig { + /// Returns the env, actual path, and internal path to use for the loader + pub fn loader_env(&self) -> Option<(String, String, String)> { + self.loader.as_ref().map(|loader| match loader { + MuxKeysLoader::File(path_buf) => { + let path = + path_buf.to_str().unwrap_or_else(|| panic!("invalid path: {:?}", path_buf)); + let internal_path = get_mux_path(&self.id); + + (get_mux_env(&self.id), path.to_owned(), internal_path) + } + }) + } +} + +#[derive(Debug, Clone, Deserialize, Serialize)] +/// A relay config with all optional fields. See [`RelayConfig`] for the +/// description of the fields. +pub struct PartialRelayConfig { + pub id: Option, + #[serde(rename = "url")] + pub entry: Option, + pub headers: Option>, + pub enable_timing_games: Option, + pub target_first_request_ms: Option, + pub frequency_get_header_ms: Option, +} + +impl PartialRelayConfig { + pub fn id(&self) -> eyre::Result<&str> { + match &self.id { + Some(id) => Ok(id.as_str()), + None => { + let entry = self.entry.as_ref().ok_or_else(|| { + eyre!("relays in [[mux]] need to specifify either an `id` or a `url`") + })?; + Ok(entry.id.as_str()) + } + } + } +} + +#[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(untagged)] +pub enum MuxKeysLoader { + /// A file containing a list of validator pubkeys + File(PathBuf), +} + +impl MuxKeysLoader { + pub fn load(&self, mux_id: &str) -> eyre::Result> { + match self { + Self::File(config_path) => { + // First try loading from env + let path: PathBuf = load_optional_env_var(&get_mux_env(mux_id)) + .map(PathBuf::from) + .unwrap_or(config_path.clone()); + let file = load_file(path)?; + serde_json::from_str(&file).wrap_err("failed to parse mux keys file") + } + } + } +} + +fn load_file + std::fmt::Debug>(path: P) -> eyre::Result { + std::fs::read_to_string(&path).wrap_err(format!("Unable to find mux keys file: {path:?}")) +} + +/// A different env var for each mux +fn get_mux_env(mux_id: &str) -> String { + format!("{MUX_PATH_ENV}_{mux_id}") +} + +/// Path to the mux file +fn get_mux_path(mux_id: &str) -> String { + format!("/{mux_id}-mux_keys.json") +} diff --git a/crates/common/src/config/pbs.rs b/crates/common/src/config/pbs.rs index 3b36d40f..0144ce88 100644 --- a/crates/common/src/config/pbs.rs +++ b/crates/common/src/config/pbs.rs @@ -6,17 +6,23 @@ use std::{ sync::Arc, }; -use alloy::primitives::{utils::format_ether, U256}; +use alloy::{ + primitives::{utils::format_ether, U256}, + rpc::types::beacon::BlsPublicKey, +}; use eyre::{ensure, Result}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use url::Url; use super::{ - constants::PBS_IMAGE_DEFAULT, load_optional_env_var, CommitBoostConfig, PBS_ENDPOINT_ENV, + constants::PBS_IMAGE_DEFAULT, load_optional_env_var, CommitBoostConfig, RuntimeMuxConfig, + PBS_ENDPOINT_ENV, }; use crate::{ commit::client::SignerClient, - config::{load_env_var, load_file_from_env, CONFIG_ENV, MODULE_JWT_ENV, SIGNER_URL_ENV}, + config::{ + load_env_var, load_file_from_env, PbsMuxes, CONFIG_ENV, MODULE_JWT_ENV, SIGNER_URL_ENV, + }, pbs::{ BuilderEventPublisher, DefaultTimeout, RelayClient, RelayEntry, DEFAULT_PBS_PORT, LATE_IN_SLOT_TIME_MS, @@ -45,6 +51,12 @@ pub struct RelayConfig { pub frequency_get_header_ms: Option, } +impl RelayConfig { + pub fn id(&self) -> &str { + self.id.as_deref().unwrap_or(self.entry.id.as_str()) + } +} + #[derive(Debug, Clone, Deserialize, Serialize)] pub struct PbsConfig { /// Host to receive BuilderAPI calls from beacon node @@ -149,6 +161,8 @@ pub struct PbsModuleConfig { pub signer_client: Option, /// Event publisher pub event_publisher: Option, + /// Muxes config + pub muxes: Option>, } fn default_pbs() -> String { @@ -158,6 +172,7 @@ fn default_pbs() -> String { /// Loads the default pbs config, i.e. with no signer client or custom data pub fn load_pbs_config() -> Result { let config = CommitBoostConfig::from_env_path()?; + config.validate()?; // use endpoint from env if set, otherwise use default host and port let endpoint = if let Some(endpoint) = load_optional_env_var(PBS_ENDPOINT_ENV) { @@ -166,6 +181,11 @@ pub fn load_pbs_config() -> Result { SocketAddr::from((config.pbs.pbs_config.host, config.pbs.pbs_config.port)) }; + let muxes = config + .muxes + .map(|muxes| muxes.validate_and_fill(&config.pbs.pbs_config, &config.relays)) + .transpose()?; + let relay_clients = config.relays.into_iter().map(RelayClient::new).collect::>>()?; let maybe_publiher = BuilderEventPublisher::new_from_env()?; @@ -177,6 +197,7 @@ pub fn load_pbs_config() -> Result { relays: relay_clients, signer_client: None, event_publisher: maybe_publiher, + muxes, }) } @@ -195,6 +216,7 @@ pub fn load_pbs_custom_config() -> Result<(PbsModuleConfig, chain: Chain, relays: Vec, pbs: CustomPbsConfig, + muxes: Option, } // load module config including the extra data (if any) @@ -211,6 +233,13 @@ pub fn load_pbs_custom_config() -> Result<(PbsModuleConfig, )) }; + let muxes = match cb_config.muxes { + Some(muxes) => Some( + muxes.validate_and_fill(&cb_config.pbs.static_config.pbs_config, &cb_config.relays)?, + ), + None => None, + }; + let relay_clients = cb_config.relays.into_iter().map(RelayClient::new).collect::>>()?; let maybe_publiher = BuilderEventPublisher::new_from_env()?; @@ -232,6 +261,7 @@ pub fn load_pbs_custom_config() -> Result<(PbsModuleConfig, relays: relay_clients, signer_client, event_publisher: maybe_publiher, + muxes, }, cb_config.pbs.extra, )) diff --git a/crates/common/src/pbs/relay.rs b/crates/common/src/pbs/relay.rs index 3a0702e1..db18466a 100644 --- a/crates/common/src/pbs/relay.rs +++ b/crates/common/src/pbs/relay.rs @@ -19,7 +19,7 @@ use crate::{config::RelayConfig, DEFAULT_REQUEST_TIMEOUT}; /// A parsed entry of the relay url in the format: scheme://pubkey@host #[derive(Debug, Clone)] pub struct RelayEntry { - /// Default if of the relay, the hostname of the url + /// Default ID of the relay, the hostname of the url pub id: String, /// Public key of the relay pub pubkey: BlsPublicKey, @@ -42,8 +42,9 @@ impl<'de> Deserialize<'de> for RelayEntry { D: serde::Deserializer<'de>, { let url = Url::deserialize(deserializer)?; - let pubkey = BlsPublicKey::from_hex(url.username()).map_err(serde::de::Error::custom)?; let id = url.host().ok_or(serde::de::Error::custom("missing host"))?.to_string(); + let pubkey = BlsPublicKey::from_hex(url.username()) + .map_err(|_| serde::de::Error::custom("invalid BLS pubkey"))?; Ok(RelayEntry { pubkey, url, id }) } @@ -79,11 +80,7 @@ impl RelayClient { .timeout(DEFAULT_REQUEST_TIMEOUT) .build()?; - Ok(Self { - id: Arc::new(config.id.clone().unwrap_or(config.entry.id.clone())), - client, - config: Arc::new(config), - }) + Ok(Self { id: Arc::new(config.id().to_owned()), client, config: Arc::new(config) }) } pub fn pubkey(&self) -> BlsPublicKey { diff --git a/crates/common/src/pbs/types/get_header.rs b/crates/common/src/pbs/types/get_header.rs index f8591a24..ebffa946 100644 --- a/crates/common/src/pbs/types/get_header.rs +++ b/crates/common/src/pbs/types/get_header.rs @@ -12,8 +12,11 @@ use super::{ #[derive(Debug, Serialize, Deserialize, Clone, Copy)] pub struct GetHeaderParams { + /// The slot to request the header for pub slot: u64, + /// The parent hash of the block to request the header for pub parent_hash: B256, + /// The pubkey of the validator that is requesting the header pub pubkey: BlsPublicKey, } diff --git a/crates/common/src/pbs/types/kzg.rs b/crates/common/src/pbs/types/kzg.rs index c1ff15a2..e5b3fe6f 100644 --- a/crates/common/src/pbs/types/kzg.rs +++ b/crates/common/src/pbs/types/kzg.rs @@ -94,7 +94,7 @@ impl FromStr for KzgCommitment { } // PROOF -const BYTES_PER_PROOF: usize = 48; +pub const BYTES_PER_PROOF: usize = 48; #[derive(Debug, Clone)] pub struct KzgProof(pub [u8; BYTES_PER_PROOF]); diff --git a/crates/common/src/pbs/types/mod.rs b/crates/common/src/pbs/types/mod.rs index aacefbed..f4a2f175 100644 --- a/crates/common/src/pbs/types/mod.rs +++ b/crates/common/src/pbs/types/mod.rs @@ -7,8 +7,17 @@ mod kzg; mod spec; mod utils; -pub use beacon_block::{SignedBlindedBeaconBlock, SubmitBlindedBlockResponse}; -pub use execution_payload::{Transaction, EMPTY_TX_ROOT_HASH}; -pub use get_header::{GetHeaderParams, GetHeaderResponse, SignedExecutionPayloadHeader}; +pub use beacon_block::{PayloadAndBlobs, SignedBlindedBeaconBlock, SubmitBlindedBlockResponse}; +pub use blobs_bundle::{Blob, BlobsBundle}; +pub use execution_payload::{ + ExecutionPayload, ExecutionPayloadHeader, Transaction, Transactions, Withdrawal, + EMPTY_TX_ROOT_HASH, +}; +pub use get_header::{ + ExecutionPayloadHeaderMessage, GetHeaderParams, GetHeaderResponse, SignedExecutionPayloadHeader, +}; +pub use kzg::{ + KzgCommitment, KzgCommitments, KzgProof, KzgProofs, BYTES_PER_COMMITMENT, BYTES_PER_PROOF, +}; pub use spec::{DenebSpec, EthSpec}; pub use utils::{Version, VersionedResponse}; diff --git a/crates/common/src/signer/loader.rs b/crates/common/src/signer/loader.rs index c06f6716..06fd1c3d 100644 --- a/crates/common/src/signer/loader.rs +++ b/crates/common/src/signer/loader.rs @@ -1,11 +1,23 @@ -use std::{fs, path::PathBuf}; +use std::{ + ffi::OsStr, + fs::{self, File}, + io::BufReader, + path::PathBuf, +}; +use aes::{ + cipher::{KeyIvInit, StreamCipher}, + Aes128, +}; use alloy::{primitives::hex::FromHex, rpc::types::beacon::BlsPublicKey}; -use eth2_keystore::Keystore; -use eyre::{eyre, Context}; +use eth2_keystore::{json_keystore::JsonKeystore, Keystore}; +use eyre::{eyre, Context, OptionExt}; +use pbkdf2::{hmac, pbkdf2}; use serde::{de, Deserialize, Deserializer, Serialize}; use tracing::warn; +use unicode_normalization::UnicodeNormalization; +use super::{BlsSigner, EcdsaSigner, PrysmDecryptedKeystore, PrysmKeystore}; use crate::{ config::{load_env_var, SIGNER_DIR_KEYS_ENV, SIGNER_DIR_SECRETS_ENV, SIGNER_KEYS_ENV}, signer::ConsensusSigner, @@ -21,9 +33,22 @@ pub enum SignerLoader { ValidatorsDir { keys_path: PathBuf, secrets_path: PathBuf, + format: ValidatorKeysFormat, }, } +#[derive(Debug, Serialize, Deserialize, Clone)] +pub enum ValidatorKeysFormat { + #[serde(alias = "lighthouse")] + Lighthouse, + #[serde(alias = "teku")] + Teku, + #[serde(alias = "lodestar")] + Lodestar, + #[serde(alias = "prysm")] + Prysm, +} + impl SignerLoader { pub fn load_keys(self) -> eyre::Result> { self.load_from_env() @@ -43,12 +68,26 @@ impl SignerLoader { .collect::>() .context("failed to load signers")? } - SignerLoader::ValidatorsDir { .. } => { + SignerLoader::ValidatorsDir { keys_path, secrets_path, format } => { // TODO: hacky way to load for now, we should support reading the // definitions.yml file - let keys_path = load_env_var(SIGNER_DIR_KEYS_ENV)?; - let secrets_path = load_env_var(SIGNER_DIR_SECRETS_ENV)?; - load_secrets_and_keys(keys_path, secrets_path).context("failed to load signers")? + let keys_path = load_env_var(SIGNER_DIR_KEYS_ENV).unwrap_or( + keys_path.to_str().ok_or_eyre("Missing signer keys path")?.to_string(), + ); + let secrets_path = load_env_var(SIGNER_DIR_SECRETS_ENV).unwrap_or( + secrets_path.to_str().ok_or_eyre("Missing signer secrets path")?.to_string(), + ); + + return match format { + ValidatorKeysFormat::Lighthouse => { + load_from_lighthouse_format(keys_path, secrets_path) + } + ValidatorKeysFormat::Teku => load_from_teku_format(keys_path, secrets_path), + ValidatorKeysFormat::Lodestar => { + load_from_lodestar_format(keys_path, secrets_path) + } + ValidatorKeysFormat::Prysm => load_from_prysm_format(keys_path, secrets_path), + }; } }) } @@ -72,7 +111,7 @@ impl<'de> Deserialize<'de> for FileKey { } } -fn load_secrets_and_keys( +fn load_from_lighthouse_format( keys_path: String, secrets_path: String, ) -> eyre::Result> { @@ -105,18 +144,174 @@ fn load_secrets_and_keys( Ok(signers) } +fn load_from_teku_format( + keys_path: String, + secrets_path: String, +) -> eyre::Result> { + let entries = fs::read_dir(keys_path.clone())?; + let mut signers = Vec::new(); + + for entry in entries { + let entry = entry?; + let path = entry.path(); + + if path.is_dir() { + warn!("Path {path:?} is a dir"); + continue; + } + + let file_name = path + .file_name() + .and_then(OsStr::to_str) + .ok_or_eyre("File name not valid")? + .rsplit_once(".") + .ok_or_eyre("File doesn't have extension")? + .0; + + match load_one( + format!("{keys_path}/{file_name}.json"), + format!("{secrets_path}/{file_name}.txt"), + ) { + Ok(signer) => signers.push(signer), + Err(e) => warn!("Sign load error: {e}"), + } + } + + Ok(signers) +} + +fn load_from_lodestar_format( + keys_path: String, + password_path: String, +) -> eyre::Result> { + let entries = fs::read_dir(keys_path)?; + let mut signers = Vec::new(); + + for entry in entries { + let entry = entry?; + let path = entry.path(); + + if path.is_dir() { + warn!("Path {path:?} is a dir"); + continue; + } + + let key_path = match path.as_os_str().to_str() { + Some(key_path) => key_path, + None => { + warn!("Path {path:?} cannot be converted to string"); + continue; + } + }; + + match load_one(key_path.to_string(), password_path.clone()) { + Ok(signer) => signers.push(signer), + Err(e) => warn!("Sign load error: {e}"), + } + } + + Ok(signers) +} + +/// Prysm's keystore is a json file with the keys encrypted with a password, +/// among with some metadata to decrypt them. +/// Once decrypted, the keys have the following structure: +/// ```json +/// { +/// "private_keys": [ +/// "sk1_base64_encoded", +/// "sk2_base64_encoded", +/// ... +/// ], +/// "public_keys": [ +/// "pk1_base64_encoded", +/// "pk2_base64_encoded", +/// ... +/// ] +/// } +/// ``` +fn load_from_prysm_format( + accounts_path: String, + password_path: String, +) -> eyre::Result> { + let accounts_file = File::open(accounts_path)?; + let accounts_reader = BufReader::new(accounts_file); + let keystore: PrysmKeystore = + serde_json::from_reader(accounts_reader).map_err(|e| eyre!("Failed reading json: {e}"))?; + + let password = fs::read_to_string(password_path)?; + // Normalized as required by EIP-2335 + // (https://eips.ethereum.org/EIPS/eip-2335#password-requirements) + let normalized_password = password + .nfkd() + .collect::() + .bytes() + .filter(|char| (*char > 0x1F && *char < 0x7F) || *char > 0x9F) + .collect::>(); + + let mut decryption_key = [0u8; 32]; + pbkdf2::>( + &normalized_password, + &keystore.salt, + keystore.c, + &mut decryption_key, + )?; + + let ciphertext = keystore.message; + + let mut cipher = ctr::Ctr128BE::::new_from_slices(&decryption_key[..16], &keystore.iv) + .map_err(|_| eyre!("Invalid key or nonce"))?; + + let mut buf = vec![0u8; ciphertext.len()].into_boxed_slice(); + cipher + .apply_keystream_b2b(&ciphertext, &mut buf) + .map_err(|_| eyre!("Failed decrypting accounts"))?; + + let decrypted_keystore: PrysmDecryptedKeystore = + serde_json::from_slice(&buf).map_err(|e| eyre!("Failed reading json: {e}"))?; + let mut signers = Vec::with_capacity(decrypted_keystore.private_keys.len()); + + for key in decrypted_keystore.private_keys { + let signer = ConsensusSigner::new_from_bytes(&key)?; + signers.push(signer); + } + + Ok(signers) +} + fn load_one(ks_path: String, pw_path: String) -> eyre::Result { let keystore = Keystore::from_json_file(ks_path).map_err(|_| eyre!("failed reading json"))?; - let password = fs::read(pw_path)?; + let password = + fs::read(pw_path.clone()).map_err(|e| eyre!("Failed to read password ({pw_path}): {e}"))?; let key = keystore.decrypt_keypair(&password).map_err(|_| eyre!("failed decrypting keypair"))?; ConsensusSigner::new_from_bytes(key.sk.serialize().as_bytes()) } +pub fn load_bls_signer(keys_path: PathBuf, secrets_path: PathBuf) -> eyre::Result { + load_one(keys_path.to_string_lossy().to_string(), secrets_path.to_string_lossy().to_string()) +} + +pub fn load_ecdsa_signer(keys_path: PathBuf, secrets_path: PathBuf) -> eyre::Result { + let key_file = std::fs::File::open(keys_path.to_string_lossy().to_string())?; + let key_reader = std::io::BufReader::new(key_file); + let keystore: JsonKeystore = serde_json::from_reader(key_reader)?; + let password = std::fs::read(secrets_path)?; + let decrypted_password = eth2_keystore::decrypt(&password, &keystore.crypto).unwrap(); + + EcdsaSigner::new_from_bytes(decrypted_password.as_bytes()) +} + #[cfg(test)] mod tests { - use super::FileKey; + use alloy::{hex, primitives::FixedBytes}; + + use super::{load_from_lighthouse_format, load_from_lodestar_format, FileKey}; + use crate::signer::{ + loader::{load_from_prysm_format, load_from_teku_format}, + BlsPublicKey, BlsSigner, + }; #[test] fn test_decode() { @@ -133,4 +328,81 @@ mod tests { assert_eq!(decoded[0].secret_key, s) } + + fn test_correct_load(signers: Vec) { + assert_eq!(signers.len(), 2); + assert!(signers.iter().any(|s| s.pubkey() == BlsPublicKey::from(FixedBytes::new( + hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4") + )))); + assert!(signers.iter().any(|s| s.pubkey() == BlsPublicKey::from(FixedBytes::new( + hex!("b3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9") + )))); + } + + #[test] + fn test_load_lighthouse() { + let result = load_from_lighthouse_format( + "../../tests/data/keystores/keys".into(), + "../../tests/data/keystores/secrets".into(), + ); + + assert!(result.is_ok()); + + test_correct_load(result.unwrap()); + } + + #[test] + fn test_load_teku() { + let result = load_from_teku_format( + "../../tests/data/keystores/teku-keys".into(), + "../../tests/data/keystores/teku-secrets".into(), + ); + + assert!(result.is_ok()); + + test_correct_load(result.unwrap()); + } + + #[test] + fn test_load_prysm() { + let result = load_from_prysm_format( + "../../tests/data/keystores/prysm/direct/accounts/all-accounts.keystore.json".into(), + "../../tests/data/keystores/prysm/empty_pass".into(), + ); + + assert!(result.is_ok()); + + test_correct_load(result.unwrap()); + } + + #[test] + fn test_load_lodestar() { + let result = load_from_lodestar_format( + "../../tests/data/keystores/teku-keys/".into(), + "../../tests/data/keystores/secrets/0x883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4".into() + ); + + assert!(result.is_ok()); + + let signers = result.unwrap(); + + assert_eq!(signers.len(), 1); + assert!(signers[0].pubkey() == BlsPublicKey::from(FixedBytes::new( + hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4") + ))); + + let result = load_from_lodestar_format( + "../../tests/data/keystores/teku-keys/".into(), + "../../tests/data/keystores/secrets/0xb3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9".into() + ); + + assert!(result.is_ok()); + + let signers = result.unwrap(); + + assert_eq!(signers.len(), 1); + assert!(signers[0].pubkey() == BlsPublicKey::from(FixedBytes::new( + hex!("b3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9") + ))); + } } diff --git a/crates/common/src/signer/store.rs b/crates/common/src/signer/store.rs index 5a6e9303..0fcbd96f 100644 --- a/crates/common/src/signer/store.rs +++ b/crates/common/src/signer/store.rs @@ -2,15 +2,31 @@ use std::{ collections::HashMap, fs::{create_dir_all, read_to_string}, io::Write, - path::PathBuf, + path::{Path, PathBuf}, + str::FromStr, }; -use alloy::primitives::Bytes; +use alloy::{ + hex, + primitives::{Bytes, FixedBytes}, + rpc::types::beacon::constants::BLS_SIGNATURE_BYTES_LEN, +}; +use eth2_keystore::{ + default_kdf, + json_keystore::{ + Aes128Ctr, ChecksumModule, Cipher, CipherModule, Crypto, JsonKeystore, KdfModule, + Sha256Checksum, + }, + Uuid, IV_SIZE, SALT_SIZE, +}; +use rand::Rng; use serde::{Deserialize, Serialize}; +use tracing::warn; +use super::{load_bls_signer, load_ecdsa_signer}; use crate::{ - commit::request::{PublicKey, SignedProxyDelegation}, - config::{load_env_var, PROXY_DIR_ENV}, + commit::request::{EncryptionScheme, ProxyDelegation, PublicKey, SignedProxyDelegation}, + config::{load_env_var, PROXY_DIR_ENV, PROXY_DIR_KEYS_ENV, PROXY_DIR_SECRETS_ENV}, signer::{ BlsProxySigner, BlsPublicKey, BlsSigner, EcdsaProxySigner, EcdsaPublicKey, EcdsaSigner, ProxySigners, @@ -28,7 +44,13 @@ struct KeyAndDelegation { #[serde(untagged)] pub enum ProxyStore { /// Stores private keys in plaintext to a file, do not use in prod - File { proxy_dir: PathBuf }, + File { + proxy_dir: PathBuf, + }, + ERC2335 { + keys_path: PathBuf, + secrets_path: PathBuf, + }, } impl ProxyStore { @@ -38,6 +60,12 @@ impl ProxyStore { let path = load_env_var(PROXY_DIR_ENV)?; ProxyStore::File { proxy_dir: PathBuf::from(path) } } + ProxyStore::ERC2335 { .. } => { + let keys_path = PathBuf::from_str(&load_env_var(PROXY_DIR_KEYS_ENV)?)?; + let secrets_path = PathBuf::from_str(&load_env_var(PROXY_DIR_SECRETS_ENV)?)?; + + ProxyStore::ERC2335 { keys_path, secrets_path } + } }) } @@ -63,6 +91,16 @@ impl ProxyStore { let mut file = std::fs::File::create(file_path)?; file.write_all(content.as_ref())?; } + ProxyStore::ERC2335 { keys_path, secrets_path } => { + store_erc2335_key( + module_id, + proxy.delegation, + proxy.secret().to_vec(), + keys_path, + secrets_path, + EncryptionScheme::Bls, + )?; + } } Ok(()) @@ -90,6 +128,16 @@ impl ProxyStore { let mut file = std::fs::File::create(file_path)?; file.write_all(content.as_ref())?; } + ProxyStore::ERC2335 { keys_path, secrets_path } => { + store_erc2335_key( + module_id, + proxy.delegation, + proxy.secret(), + keys_path, + secrets_path, + EncryptionScheme::Ecdsa, + )?; + } } Ok(()) @@ -183,6 +231,430 @@ impl ProxyStore { Ok((proxy_signers, bls_map, ecdsa_map)) } + ProxyStore::ERC2335 { keys_path, secrets_path } => { + let mut proxy_signers = ProxySigners::default(); + let mut bls_map: HashMap> = HashMap::new(); + let mut ecdsa_map: HashMap> = HashMap::new(); + + for entry in std::fs::read_dir(keys_path)? { + let entry = entry?; + let consensus_key_path = entry.path(); + let consensus_pubkey = + match FixedBytes::from_str(&entry.file_name().to_string_lossy()) { + Ok(bytes) => BlsPublicKey::from(bytes), + Err(e) => { + warn!("Failed to parse consensus pubkey: {e}"); + continue; + } + }; + + if !consensus_key_path.is_dir() { + warn!("{consensus_key_path:?} is not a directory"); + continue; + } + + for entry in std::fs::read_dir(&consensus_key_path)? { + let entry = entry?; + let module_path = entry.path(); + let module_id = entry.file_name().to_string_lossy().to_string(); + + if !module_path.is_dir() { + warn!("{module_path:?} is not a directory"); + continue; + } + + let bls_path = module_path.join("bls"); + if let Ok(bls_keys) = std::fs::read_dir(&bls_path) { + for entry in bls_keys { + let entry = entry?; + let path = entry.path(); + + if !path.is_file() || + !path.extension().is_some_and(|ext| ext == "json") + { + continue; + } + + let name = entry.file_name().to_string_lossy().to_string(); + let name = name.trim_end_matches(".json"); + + let signer = load_bls_signer( + path, + secrets_path + .join(consensus_pubkey.to_string()) + .join(&module_id) + .join("bls") + .join(name), + ) + .map_err(|e| eyre::eyre!("Error loading BLS signer: {e}"))?; + + let delegation_signature = match std::fs::read_to_string( + bls_path.join(format!("{name}.sig")), + ) { + Ok(sig) => { + FixedBytes::::from_str(&sig)? + } + Err(e) => { + warn!("Failed to read delegation signature: {e}"); + continue; + } + }; + + let proxy_signer = BlsProxySigner { + signer: signer.clone(), + delegation: SignedProxyDelegation { + message: ProxyDelegation { + delegator: consensus_pubkey, + proxy: signer.pubkey(), + }, + signature: delegation_signature, + }, + }; + + proxy_signers.bls_signers.insert(signer.pubkey(), proxy_signer); + bls_map + .entry(ModuleId(module_id.clone())) + .or_default() + .push(signer.pubkey()); + } + } + + let ecdsa_path = module_path.join("ecdsa"); + if let Ok(ecdsa_keys) = std::fs::read_dir(&ecdsa_path) { + for entry in ecdsa_keys { + let entry = entry?; + let path = entry.path(); + + if !path.is_file() || + !path.extension().is_some_and(|ext| ext == "json") + { + continue; + } + + let name = entry.file_name().to_string_lossy().to_string(); + let name = name.trim_end_matches(".json"); + + let signer = load_ecdsa_signer( + path, + secrets_path + .join(format!("{consensus_pubkey:#x}")) + .join(&module_id) + .join("ecdsa") + .join(name), + )?; + let delegation_signature = match std::fs::read_to_string( + ecdsa_path.join(format!("{name}.sig")), + ) { + Ok(sig) => { + FixedBytes::::from_str(&sig)? + } + Err(e) => { + warn!("Failed to read delegation signature: {e}",); + continue; + } + }; + + let proxy_signer = EcdsaProxySigner { + signer: signer.clone(), + delegation: SignedProxyDelegation { + message: ProxyDelegation { + delegator: consensus_pubkey, + proxy: signer.pubkey(), + }, + signature: delegation_signature, + }, + }; + + proxy_signers.ecdsa_signers.insert(signer.pubkey(), proxy_signer); + ecdsa_map + .entry(ModuleId(module_id.clone())) + .or_default() + .push(signer.pubkey()); + } + } + } + } + Ok((proxy_signers, bls_map, ecdsa_map)) + } } } } + +fn store_erc2335_key( + module_id: &ModuleId, + delegation: SignedProxyDelegation, + secret: Vec, + keys_path: &Path, + secrets_path: &Path, + scheme: EncryptionScheme, +) -> eyre::Result<()> { + let proxy_pubkey = delegation.message.proxy; + + let password_bytes: [u8; 32] = rand::thread_rng().gen(); + let password = hex::encode(password_bytes); + + let pass_path = secrets_path + .join(delegation.message.delegator.to_string()) + .join(&module_id.0) + .join(scheme.to_string()); + std::fs::create_dir_all(&pass_path)?; + let pass_path = pass_path.join(proxy_pubkey.to_string()); + let mut pass_file = std::fs::File::create(&pass_path)?; + pass_file.write_all(password.as_bytes())?; + + let sig_path = keys_path + .join(delegation.message.delegator.to_string()) + .join(&module_id.0) + .join(scheme.to_string()); + std::fs::create_dir_all(&sig_path)?; + let sig_path = sig_path.join(format!("{}.sig", proxy_pubkey)); + + let mut sig_file = std::fs::File::create(sig_path)?; + sig_file.write_all(delegation.signature.to_string().as_bytes())?; + + let salt: [u8; SALT_SIZE] = rand::thread_rng().gen(); + let iv: [u8; IV_SIZE] = rand::thread_rng().gen(); + let kdf = default_kdf(salt.to_vec()); + let cipher = Cipher::Aes128Ctr(Aes128Ctr { iv: iv.to_vec().into() }); + let (cipher_text, checksum) = + eth2_keystore::encrypt(&secret, password.as_bytes(), &kdf, &cipher) + .map_err(|_| eyre::eyre!("Error encrypting key"))?; + + let keystore = JsonKeystore { + crypto: Crypto { + kdf: KdfModule { + function: kdf.function(), + params: kdf, + message: eth2_keystore::json_keystore::EmptyString, + }, + checksum: ChecksumModule { + function: Sha256Checksum::function(), + params: eth2_keystore::json_keystore::EmptyMap, + message: checksum.to_vec().into(), + }, + cipher: CipherModule { + function: cipher.function(), + params: cipher, + message: cipher_text.into(), + }, + }, + uuid: Uuid::new_v4(), + path: None, + pubkey: format!("{:x}", delegation.message.proxy), + version: eth2_keystore::json_keystore::Version::V4, + description: Some(delegation.message.proxy.to_string()), + name: None, + }; + + let json_path = keys_path + .join(delegation.message.delegator.to_string()) + .join(&module_id.0) + .join(scheme.to_string()); + std::fs::create_dir_all(&json_path)?; + let json_path = json_path.join(format!("{}.json", proxy_pubkey)); + let mut json_file = std::fs::File::create(&json_path)?; + json_file.write_all(serde_json::to_string(&keystore)?.as_bytes())?; + + Ok(()) +} + +#[cfg(test)] +mod test { + use hex::FromHex; + use tree_hash::TreeHash; + + use super::*; + use crate::{ + commit::request::{ProxyDelegationBls, SignedProxyDelegationBls}, + signer::ConsensusSigner, + types::Chain, + }; + + #[tokio::test] + async fn test_erc2335_storage_format() { + let tmp_path = match std::env::var("CB_TESTS_DIR") { + Ok(dir) => PathBuf::from(dir), + Err(_) => std::env::temp_dir(), + } + .join("test_erc2335_storage_format"); + let keys_path = tmp_path.join("keys"); + let secrets_path = tmp_path.join("secrets"); + let store = ProxyStore::ERC2335 { + keys_path: keys_path.clone(), + secrets_path: secrets_path.clone(), + }; + + let module_id = ModuleId("TEST_MODULE".to_string()); + let consensus_signer = ConsensusSigner::new_from_bytes(&hex!( + "0088e364a5396a81b50febbdc8784663fb9089b5e67cbdc173991a00c587673f" + )) + .unwrap(); + let proxy_signer = BlsSigner::new_from_bytes(&hex!( + "13000f8b3d7747e7754022720d33d5b506490429f3d593162f00e254f97d2940" + )) + .unwrap(); + + let message = ProxyDelegationBls { + delegator: consensus_signer.pubkey(), + proxy: proxy_signer.pubkey(), + }; + let signature = consensus_signer.sign(Chain::Mainnet, message.tree_hash_root().0).await; + let delegation = SignedProxyDelegationBls { signature, message }; + let proxy_signer = BlsProxySigner { signer: proxy_signer, delegation }; + + store.store_proxy_bls(&module_id, &proxy_signer).unwrap(); + + let json_path = keys_path + .join(consensus_signer.pubkey().to_string()) + .join("TEST_MODULE") + .join("bls") + .join(format!("{}.json", proxy_signer.pubkey().to_string())); + let sig_path = keys_path + .join(consensus_signer.pubkey().to_string()) + .join("TEST_MODULE") + .join("bls") + .join(format!("{}.sig", proxy_signer.pubkey().to_string())); + let pass_path = secrets_path + .join(consensus_signer.pubkey().to_string()) + .join("TEST_MODULE") + .join("bls") + .join(proxy_signer.pubkey().to_string()); + + let keystore: JsonKeystore = + serde_json::de::from_str(&std::fs::read_to_string(json_path).unwrap()).unwrap(); + + assert_eq!(keystore.pubkey, proxy_signer.pubkey().to_string().trim_start_matches("0x")); + + let sig = FixedBytes::from_hex(std::fs::read_to_string(sig_path).unwrap()); + assert!(sig.is_ok()); + assert_eq!(sig.unwrap(), signature); + + assert!(FixedBytes::<32>::from_hex(std::fs::read_to_string(pass_path).unwrap()).is_ok()); + } + + #[test] + fn test_erc2335_load() { + let keys_path = Path::new("../../tests/data/proxy/keys").to_path_buf(); + let secrets_path = Path::new("../../tests/data/proxy/secrets").to_path_buf(); + let store = ProxyStore::ERC2335 { + keys_path: keys_path.clone(), + secrets_path: secrets_path.clone(), + }; + + let (proxy_signers, bls_keys, ecdsa_keys) = store.load_proxies().unwrap(); + assert_eq!(bls_keys.len(), 1); + assert_eq!(ecdsa_keys.len(), 0); + assert_eq!(proxy_signers.bls_signers.len(), 1); + assert_eq!(proxy_signers.ecdsa_signers.len(), 0); + + let proxy_key = BlsPublicKey::from( + FixedBytes::from_hex( + "a77084280678d9f1efe4ef47a3d62af27872ce82db19a35ee012c4fd5478e6b1123b8869032ba18b2383e8873294f0ba" + ).unwrap() + ); + let consensus_key = BlsPublicKey::from( + FixedBytes::from_hex( + "ac5e059177afc33263e95d0be0690138b9a1d79a6e19018086a0362e0c30a50bf9e05a08cb44785724d0b2718c5c7118" + ).unwrap() + ); + + let proxy_signer = proxy_signers.bls_signers.get(&proxy_key); + + assert!(proxy_signer.is_some()); + let proxy_signer = proxy_signer.unwrap(); + + assert_eq!( + proxy_signer.delegation.signature, + FixedBytes::from_hex( + std::fs::read_to_string( + keys_path + .join(consensus_key.to_string()) + .join("TEST_MODULE") + .join("bls") + .join(format!("{proxy_key}.sig")) + ) + .unwrap() + ) + .unwrap() + ); + assert_eq!(proxy_signer.delegation.message.delegator, consensus_key); + assert_eq!(proxy_signer.delegation.message.proxy, proxy_key); + + assert!(bls_keys + .get(&ModuleId("TEST_MODULE".into())) + .is_some_and(|keys| keys.contains(&proxy_key))); + } + + #[tokio::test] + async fn test_erc2335_store_and_load() { + let tmp_path = match std::env::var("CB_TESTS_DIR") { + Ok(dir) => PathBuf::from(dir), + Err(_) => std::env::temp_dir(), + } + .join("test_erc2335_store_and_load"); + let keys_path = tmp_path.join("keys"); + let secrets_path = tmp_path.join("secrets"); + let store = ProxyStore::ERC2335 { + keys_path: keys_path.clone(), + secrets_path: secrets_path.clone(), + }; + + let module_id = ModuleId("TEST_MODULE".to_string()); + let consensus_signer = ConsensusSigner::new_from_bytes(&hex!( + "0088e364a5396a81b50febbdc8784663fb9089b5e67cbdc173991a00c587673f" + )) + .unwrap(); + let proxy_signer = BlsSigner::new_from_bytes(&hex!( + "13000f8b3d7747e7754022720d33d5b506490429f3d593162f00e254f97d2940" + )) + .unwrap(); + + let message = ProxyDelegationBls { + delegator: consensus_signer.pubkey(), + proxy: proxy_signer.pubkey(), + }; + let signature = consensus_signer.sign(Chain::Mainnet, message.tree_hash_root().0).await; + let delegation = SignedProxyDelegationBls { signature, message }; + let proxy_signer = BlsProxySigner { signer: proxy_signer, delegation }; + + store.store_proxy_bls(&module_id, &proxy_signer).unwrap(); + + let load_result = store.load_proxies(); + assert!(load_result.is_ok()); + + let (proxy_signers, bls_keys, ecdsa_keys) = load_result.unwrap(); + + assert_eq!(bls_keys.len(), 1); + assert_eq!(ecdsa_keys.len(), 0); + assert_eq!(proxy_signers.bls_signers.len(), 1); + assert_eq!(proxy_signers.ecdsa_signers.len(), 0); + + let loaded_proxy_signer = proxy_signers.bls_signers.get(&proxy_signer.pubkey()); + + assert!(loaded_proxy_signer.is_some()); + let loaded_proxy_signer = loaded_proxy_signer.unwrap(); + + assert_eq!( + loaded_proxy_signer.delegation.signature, + FixedBytes::from_hex( + std::fs::read_to_string( + keys_path + .join(consensus_signer.pubkey().to_string()) + .join("TEST_MODULE") + .join("bls") + .join(format!("{}.sig", proxy_signer.pubkey().to_string())) + ) + .unwrap() + ) + .unwrap() + ); + assert_eq!(loaded_proxy_signer.delegation.message.delegator, consensus_signer.pubkey()); + assert_eq!(loaded_proxy_signer.delegation.message.proxy, proxy_signer.pubkey()); + + assert!(bls_keys + .get(&ModuleId("TEST_MODULE".into())) + .is_some_and(|keys| keys.contains(&proxy_signer.pubkey()))); + + std::fs::remove_dir_all(tmp_path).unwrap(); + } +} diff --git a/crates/common/src/signer/types.rs b/crates/common/src/signer/types.rs index bb1bfc9e..4071f858 100644 --- a/crates/common/src/signer/types.rs +++ b/crates/common/src/signer/types.rs @@ -1,6 +1,12 @@ use std::collections::HashMap; +use alloy::primitives::Bytes; +use base64::{prelude::BASE64_STANDARD, Engine}; use derive_more::derive::Deref; +use serde::{ + de::{Error as DeError, Unexpected}, + Deserialize, Deserializer, +}; use super::{BlsPublicKey, EcdsaPublicKey, EcdsaSigner}; use crate::{ @@ -33,3 +39,86 @@ pub struct ProxySigners { pub bls_signers: HashMap, pub ecdsa_signers: HashMap, } + +// Prysm keystore actually has a more complex structure, but we only need +// this subset of fields +pub struct PrysmKeystore { + pub message: Bytes, + pub salt: Bytes, + pub c: u32, + pub iv: Bytes, +} + +#[derive(Deserialize, Debug)] +pub struct PrysmDecryptedKeystore { + #[serde(deserialize_with = "base64_list_decode")] + pub private_keys: Vec, + #[serde(deserialize_with = "base64_list_decode")] + pub public_keys: Vec, +} + +fn base64_list_decode<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let list: Vec<&str> = Deserialize::deserialize(deserializer)?; + let mut decoded_list = Vec::with_capacity(list.len()); + + for encoded_key in list.iter() { + decoded_list.push( + BASE64_STANDARD + .decode(encoded_key) + .map_err(|_| DeError::invalid_type(Unexpected::Other("unknown"), &"base64 string"))? + .into(), + ); + } + + Ok(decoded_list) +} + +// impl serde deserialize for PrysmKeystore: +impl<'de> Deserialize<'de> for PrysmKeystore { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let value: serde_json::Value = Deserialize::deserialize(deserializer)?; + let crypto = value.get("crypto").ok_or(DeError::missing_field("crypto"))?; + let cipher = crypto.get("cipher").ok_or(DeError::missing_field("crypto.cipher"))?; + let kdf_params = crypto + .get("kdf") + .ok_or(DeError::missing_field("kdf"))? + .get("params") + .ok_or(DeError::missing_field("kdf.params"))?; + + Ok(PrysmKeystore { + message: serde_json::from_value( + cipher + .get("message") + .ok_or(DeError::missing_field("crypto.cipher.message"))? + .clone(), + ) + .map_err(|_| DeError::invalid_type(Unexpected::Other("unknown"), &"bytes"))?, + salt: serde_json::from_value( + kdf_params + .get("salt") + .ok_or(DeError::missing_field("crypto.kdf.params.salt"))? + .clone(), + ) + .map_err(|_| DeError::invalid_type(Unexpected::Other("unknown"), &"bytes"))?, + c: serde_json::from_value( + kdf_params.get("c").ok_or(DeError::missing_field("crypto.kdf.params.c"))?.clone(), + ) + .map_err(|_| DeError::invalid_type(Unexpected::Other("unknown"), &"u32"))?, + iv: serde_json::from_value( + cipher + .get("params") + .ok_or(DeError::missing_field("crypto.cipher.params"))? + .get("iv") + .ok_or(DeError::missing_field("crypto.cipher.params.iv"))? + .clone(), + ) + .map_err(|_| DeError::invalid_type(Unexpected::Other("unknown"), &"bytes"))?, + }) + } +} diff --git a/crates/common/src/types.rs b/crates/common/src/types.rs index c49323a6..c7684520 100644 --- a/crates/common/src/types.rs +++ b/crates/common/src/types.rs @@ -23,9 +23,11 @@ pub enum Chain { Holesky, Sepolia, Helder, - Custom { genesis_time_secs: u64, slot_time_secs: u64, genesis_fork_version: [u8; 4] }, + Custom { genesis_time_secs: u64, slot_time_secs: u64, genesis_fork_version: ForkVersion }, } +pub type ForkVersion = [u8; 4]; + impl std::fmt::Debug for Chain { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { @@ -54,7 +56,7 @@ impl Chain { } } - pub fn genesis_fork_version(&self) -> [u8; 4] { + pub fn genesis_fork_version(&self) -> ForkVersion { match self { Chain::Mainnet => KnownChain::Mainnet.genesis_fork_version(), Chain::Holesky => KnownChain::Holesky.genesis_fork_version(), @@ -120,7 +122,7 @@ impl KnownChain { } } - pub fn genesis_fork_version(&self) -> [u8; 4] { + pub fn genesis_fork_version(&self) -> ForkVersion { match self { KnownChain::Mainnet => hex!("00000000"), KnownChain::Holesky => hex!("01017000"), @@ -163,8 +165,19 @@ impl From for Chain { #[serde(untagged)] pub enum ChainLoader { Known(KnownChain), - Path(PathBuf), - Custom { genesis_time_secs: u64, slot_time_secs: u64, genesis_fork_version: Bytes }, + Path { + /// Genesis time as returned in /eth/v1/beacon/genesis + genesis_time_secs: u64, + /// Path to the genesis spec, as returned by /eth/v1/config/spec + /// either in JSON or YAML format + path: PathBuf, + }, + Custom { + /// Genesis time as returned in /eth/v1/beacon/genesis + genesis_time_secs: u64, + slot_time_secs: u64, + genesis_fork_version: Bytes, + }, } impl Serialize for Chain { @@ -199,9 +212,13 @@ impl<'de> Deserialize<'de> for Chain { match loader { ChainLoader::Known(known) => Ok(Chain::from(known)), - ChainLoader::Path(path) => load_chain_from_file(path).map_err(serde::de::Error::custom), + ChainLoader::Path { genesis_time_secs, path } => { + let (slot_time_secs, genesis_fork_version) = + load_chain_from_file(path).map_err(serde::de::Error::custom)?; + Ok(Chain::Custom { genesis_time_secs, slot_time_secs, genesis_fork_version }) + } ChainLoader::Custom { genesis_time_secs, slot_time_secs, genesis_fork_version } => { - let genesis_fork_version: [u8; 4] = + let genesis_fork_version: ForkVersion = genesis_fork_version.as_ref().try_into().map_err(serde::de::Error::custom)?; Ok(Chain::Custom { genesis_time_secs, slot_time_secs, genesis_fork_version }) } @@ -209,38 +226,26 @@ impl<'de> Deserialize<'de> for Chain { } } -/// Load a chain config from a spec file, such as returned by -/// /eth/v1/config/spec ref: https://ethereum.github.io/beacon-APIs/#/Config/getSpec +/// Returns seconds_per_slot and genesis_fork_version from a spec, such as +/// returned by /eth/v1/config/spec ref: https://ethereum.github.io/beacon-APIs/#/Config/getSpec /// Try to load two formats: /// - JSON as return the getSpec endpoint, either with or without the `data` /// field /// - YAML as used e.g. in Kurtosis/Ethereum Package -pub fn load_chain_from_file(path: PathBuf) -> eyre::Result { +pub fn load_chain_from_file(path: PathBuf) -> eyre::Result<(u64, ForkVersion)> { #[derive(Deserialize)] #[serde(rename_all = "UPPERCASE")] struct QuotedSpecFile { - #[serde(with = "serde_utils::quoted_u64")] - min_genesis_time: u64, - #[serde(with = "serde_utils::quoted_u64")] - genesis_delay: u64, #[serde(with = "serde_utils::quoted_u64")] seconds_per_slot: u64, genesis_fork_version: Bytes, } impl QuotedSpecFile { - fn to_chain(&self) -> eyre::Result { - let genesis_fork_version: [u8; 4] = self.genesis_fork_version.as_ref().try_into()?; - - Ok(Chain::Custom { - // note that this can be wrong, (e.g. it's wrong in mainnet). The correct - // value should come from /eth/v1/beacon/genesis - // more info here: https://kb.beaconcha.in/ethereum-staking/the-genesis-event - // FIXME - genesis_time_secs: self.min_genesis_time + self.genesis_delay, - slot_time_secs: self.seconds_per_slot, - genesis_fork_version, - }) + fn to_chain(&self) -> eyre::Result<(u64, ForkVersion)> { + let genesis_fork_version: ForkVersion = + self.genesis_fork_version.as_ref().try_into()?; + Ok((self.seconds_per_slot, genesis_fork_version)) } } @@ -252,21 +257,14 @@ pub fn load_chain_from_file(path: PathBuf) -> eyre::Result { #[derive(Deserialize)] #[serde(rename_all = "UPPERCASE")] struct SpecFile { - min_genesis_time: u64, - genesis_delay: u64, seconds_per_slot: u64, genesis_fork_version: u32, } impl SpecFile { - fn to_chain(&self) -> Chain { - let genesis_fork_version: [u8; 4] = self.genesis_fork_version.to_be_bytes(); - - Chain::Custom { - genesis_time_secs: self.min_genesis_time + self.genesis_delay, - slot_time_secs: self.seconds_per_slot, - genesis_fork_version, - } + fn to_chain(&self) -> (u64, ForkVersion) { + let genesis_fork_version: ForkVersion = self.genesis_fork_version.to_be_bytes(); + (self.seconds_per_slot, genesis_fork_version) } } @@ -320,11 +318,11 @@ mod tests { path.pop(); path.push("tests/data/mainnet_spec_data.json"); - let s = format!("chain = {path:?}"); + let s = format!("chain = {{ genesis_time_secs = 1, path = {path:?}}}"); let decoded: MockConfig = toml::from_str(&s).unwrap(); - // see fixme in load_chain_from_file + assert_eq!(decoded.chain.genesis_time_sec(), 1); assert_eq!(decoded.chain.slot_time_sec(), KnownChain::Mainnet.slot_time_sec()); assert_eq!( decoded.chain.genesis_fork_version(), @@ -341,11 +339,11 @@ mod tests { path.pop(); path.push("tests/data/holesky_spec.json"); - let s = format!("chain = {path:?}"); + let s = format!("chain = {{ genesis_time_secs = 1, path = {path:?}}}"); let decoded: MockConfig = toml::from_str(&s).unwrap(); assert_eq!(decoded.chain, Chain::Custom { - genesis_time_secs: KnownChain::Holesky.genesis_time_sec(), + genesis_time_secs: 1, slot_time_secs: KnownChain::Holesky.slot_time_sec(), genesis_fork_version: KnownChain::Holesky.genesis_fork_version() }) @@ -360,11 +358,11 @@ mod tests { path.pop(); path.push("tests/data/sepolia_spec_data.json"); - let s = format!("chain = {path:?}"); + let s = format!("chain = {{ genesis_time_secs = 1, path = {path:?}}}"); let decoded: MockConfig = toml::from_str(&s).unwrap(); assert_eq!(decoded.chain, Chain::Custom { - genesis_time_secs: KnownChain::Sepolia.genesis_time_sec(), + genesis_time_secs: 1, slot_time_secs: KnownChain::Sepolia.slot_time_sec(), genesis_fork_version: KnownChain::Sepolia.genesis_fork_version() }) @@ -379,11 +377,11 @@ mod tests { path.pop(); path.push("tests/data/helder_spec.yml"); - let s = format!("chain = {path:?}"); + let s = format!("chain = {{ genesis_time_secs = 1, path = {path:?}}}"); let decoded: MockConfig = toml::from_str(&s).unwrap(); assert_eq!(decoded.chain, Chain::Custom { - genesis_time_secs: KnownChain::Helder.genesis_time_sec(), + genesis_time_secs: 1, slot_time_secs: KnownChain::Helder.slot_time_sec(), genesis_fork_version: KnownChain::Helder.genesis_fork_version() }) diff --git a/crates/pbs/src/mev_boost/get_header.rs b/crates/pbs/src/mev_boost/get_header.rs index b13a4e0c..04485119 100644 --- a/crates/pbs/src/mev_boost/get_header.rs +++ b/crates/pbs/src/mev_boost/get_header.rs @@ -51,15 +51,22 @@ pub async fn get_header( } let ms_into_slot = ms_into_slot(params.slot, state.config.chain); - let max_timeout_ms = state - .pbs_config() + let (pbs_config, relays, maybe_mux_id) = state.mux_config_and_relays(¶ms.pubkey); + + if let Some(mux_id) = maybe_mux_id { + debug!(mux_id, relays = relays.len(), pubkey = %params.pubkey, "using mux config"); + } else { + debug!(relays = relays.len(), pubkey = %params.pubkey, "using default config"); + } + + let max_timeout_ms = pbs_config .timeout_get_header_ms - .min(state.pbs_config().late_in_slot_time_ms.saturating_sub(ms_into_slot)); + .min(pbs_config.late_in_slot_time_ms.saturating_sub(ms_into_slot)); if max_timeout_ms == 0 { warn!( ms_into_slot, - threshold = state.pbs_config().late_in_slot_time_ms, + threshold = pbs_config.late_in_slot_time_ms, "late in slot, skipping relay requests" ); @@ -73,7 +80,6 @@ pub async fn get_header( send_headers.insert(HEADER_SLOT_UUID_KEY, HeaderValue::from_str(&slot_uuid.to_string())?); send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); - let relays = state.relays(); let mut handles = Vec::with_capacity(relays.len()); for relay in relays.iter() { handles.push(send_timed_get_header( diff --git a/crates/pbs/src/state.rs b/crates/pbs/src/state.rs index eb910f0a..3defe1c7 100644 --- a/crates/pbs/src/state.rs +++ b/crates/pbs/src/state.rs @@ -82,6 +82,18 @@ where pub fn relays(&self) -> &[RelayClient] { &self.config.relays } + /// Returns the PBS config and relay clients for the given validator pubkey. + /// If the pubkey is not found in any mux, the default configs are + /// returned + pub fn mux_config_and_relays( + &self, + pubkey: &BlsPublicKey, + ) -> (&PbsConfig, &[RelayClient], Option<&str>) { + match self.config.muxes.as_ref().and_then(|muxes| muxes.get(pubkey)) { + Some(mux) => (&mux.config, mux.relays.as_slice(), Some(&mux.id)), + None => (self.pbs_config(), self.relays(), None), + } + } pub fn has_monitors(&self) -> bool { !self.config.pbs_config.relay_monitors.is_empty() diff --git a/docs/docs/get_started/configuration.md b/docs/docs/get_started/configuration.md index 0fd55179..5267da25 100644 --- a/docs/docs/get_started/configuration.md +++ b/docs/docs/get_started/configuration.md @@ -29,12 +29,206 @@ After the sidecar is started, it will expose a port (`18550` in this example), t Note that in this setup, the signer module will not be started. +## Signer Module + +To start the signer module, you need to include its parameters in the config file: + +```toml +[signer] +[signer.loader] +format = "lighthouse" +keys_path = "/path/to/keys" +secrets_path = "/path/to.secrets" +``` + +We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's easier to load the keys. We're working on adding support for additional keystores, including remote signers. These are the expected file structures for each format: + +
+ Lighthouse + + #### File structure: + ``` + ├── keys + │   ├── + │   │   └── voting-keystore.json + │   └── + │   └── voting-keystore.json + └── secrets +    ├── +    └── + ``` + + #### Config: + ```toml + [signer] + [signer.loader] + format = "lighthouse" + keys_path = "keys" + secrets_path = "secrets" + ``` +
+ +
+ Prysm + + #### File structure: + ``` + ├── wallet + │   └── direct + │      └── accounts + │         └──all-accounts.keystore.json + └── secrets +    └── password.txt + ``` + + #### Config: + ```toml + [signer] + [signer.loader] + format = "prysm" + keys_path = "wallet/direct/accounts/all-accounts.keystore.json" + secrets_path = "secrets/password.txt" + ``` +
+ +
+ Teku + + #### File structure: + ``` + ├── keys + │   ├── .json + │   └── .json + └── secrets +    ├── .txt +    └── .txt + ``` + + #### Config: + ```toml + [signer] + [signer.loader] + format = "teku" + keys_path = "keys" + secrets_path = "secrets" + ``` +
+ +
+ Lodestar + + #### File structure: + ``` + ├── keys + │   ├── .json + │   └── .json + └── secrets +    └── password.txt + ``` + + #### Config: + ```toml + [signer] + [signer.loader] + format = "lodestar" + keys_path = "keys" + secrets_path = "secrets/password.txt" + ``` + + :::note + All keys have the same password stored in `secrets/password.txt` + ::: +
+ +### Proxy keys store + +Proxy keys can be used to sign transactions with a different key than the one used to sign the block. Proxy keys are generated by the Signer module and authorized by the validator key. Each module have their own proxy keys, that can be BLS or ECDSA. + +To persist proxy keys across restarts, you must enable the proxy store in the config file. There are 2 options for this: + +
+ File + + The keys are stored in plain text in a file. This method is unsafe and should only be used for testing. + + #### File structure + + ``` + + └── + └── bls + ├── + └── + ``` + + #### Configuration + + ```toml + [signer.store] + proxy_dir = "path/to/proxy_dir" + ``` + + Where each `` file contains the following: + ```json + { + "secret": "0x...", + "delegation": { + "message": { + "delegator": "0x...", + "proxy": "0x..." + }, + "signature": "0x..." + } + } + ``` +
+ +
+ ERC2335 + + The keys are stored in a ERC-2335 style keystore, among with a password. This way, you can safely share the keys directory so without the password they are useless. + + #### File structure + + ``` + ├── + │ └── + │ └── + │ ├── bls/ + │ │ ├── .json + │ │ ├── .sig + │ │ ├── .json + │ │ └── .sig + │ └── ecdsa/ + │ ├── .json + │ └── .sig + └── + └── + └── + ├── bls/ + │ ├── + │ └── + └── ecdsa + └── + ``` + + #### Configuration + + ```toml + [signer.store] + keys_path = "path/to/keys" + secrets_path = "path/to/secrets" + ``` + + Where the `.json` files contain ERC-2335 keystore, the `.sig` files contain the signature of the delegation, and `` files contain the password to decrypt the keystores. +
+ ## Custom module We currently provide a test module that needs to be built locally. To build the module run: ```bash bash scripts/build_local_modules.sh ``` -This will create a Docker image called `test_da_commit` that periodically requests signatures from the validator, and a `test_builder_log` module that logs BuilderAPI events. +This will create a Docker image called `test_da_commit` that periodically requests signatures from the validator, and a `test_builder_log` module that logs BuilderAPI events. The `cb-config.toml` file needs to be updated as follows: ```toml @@ -46,6 +240,7 @@ url = "" [signer] [signer.loader] +format = "lighthouse" keys_path = "/path/to/keys" secrets_path = "/path/to.secrets" @@ -65,7 +260,7 @@ docker_image = "test_builder_log" ``` A few things to note: -- We now added a `signer` section which will be used to create the Signer module. To load keys in the module, we currently support the Lighthouse `validators_dir` keys and secrets. We're working on adding support for additional keystores, including remote signers. +- We now added a `signer` section which will be used to create the Signer module. - There is now a `[[modules]]` section which at a minimum needs to specify the module `id`, `type` and `docker_image`. Additional parameters needed for the business logic of the module will also be here, To learn more about developing modules, check out [here](/category/developing). @@ -80,7 +275,7 @@ You can setup Commit-Boost with Vouch in two ways. For simplicity, assume that in Vouch `blockrelay.listen-address: 127.0.0.0:19550` and in Commit-Boost `pbs.port = 18550`. #### Beacon Node to Vouch -In this setup, the BN Builder-API endpoint will be pointing to the Vouch `blockrelay` (e.g. for Lighthouse you will need the flag `--builder=http://127.0.0.0:19550`). +In this setup, the BN Builder-API endpoint will be pointing to the Vouch `blockrelay` (e.g. for Lighthouse you will need the flag `--builder=http://127.0.0.0:19550`). Modify the `blockrelay.config` file to add Commit-Boost: ```json @@ -90,7 +285,7 @@ Modify the `blockrelay.config` file to add Commit-Boost: ``` #### Beacon Node to Commit Boost -In this setup, the BN Builder-API endpoint will be pointing to the PBS module (e.g. for Lighthouse you will need the flag `--builder=http://127.0.0.0:18550`). +In this setup, the BN Builder-API endpoint will be pointing to the PBS module (e.g. for Lighthouse you will need the flag `--builder=http://127.0.0.0:18550`). This will bypass the `blockrelay` entirely so make sure all relays are properly configured in the `[[relays]]` section. @@ -99,5 +294,3 @@ This will bypass the `blockrelay` entirely so make sure all relays are properly ### Notes - It's up to you to decide which relays will be connected via Commit-Boost (`[[relays]]` section in the `toml` config) and which via Vouch (additional entries in the `relays` field). Remember that any rate-limit will be shared across the two sidecars, if running on the same machine. - You may occasionally see a `timeout` error during registrations, especially if you're running a large number of validators in the same instance. This can resolve itself as registrations will be cleared later in the epoch when relays are less busy processing other registrations. Alternatively you can also adjust the `builderclient.timeout` option in `.vouch.yml`. - - diff --git a/docs/docs/get_started/running/binary.md b/docs/docs/get_started/running/binary.md index 0f9339fc..20f9b8ad 100644 --- a/docs/docs/get_started/running/binary.md +++ b/docs/docs/get_started/running/binary.md @@ -22,13 +22,14 @@ Modules need some environment variables to work correctly. ### PBS Module - `CB_BUILDER_URLS`: optional, comma-separated list of urls to `events` modules where to post builder events - +- `CB_PBS_ENDPOINT`: optional, override the endpoint where the PBS module will open the port for the beacon node +- `CB_MUX_PATH_{ID}`: optional, override where to load mux validator keys for mux with id=\{ID\} ### Signer Module - `CB_JWTS`: required, comma-separated list of `MODULE_ID=JWT` to process signature requests - `CB_SIGNER_PORT`: required, port to open the signer server on For loading keys we currently support: - `CB_SIGNER_LOADER_FILE`: path to a `.json` with plaintext keys (for testing purposes only) - - `CB_SIGNER_LOADER_KEYS_DIR` and `CB_SIGNER_LOADER_SECRETS_DIR`: paths to the `keys` and `secrets` directories (ERC-2335 style keystores as used in Lighthouse) + - `CB_SIGNER_LOADER_FORMAT`, `CB_SIGNER_LOADER_KEYS_DIR` and `CB_SIGNER_LOADER_SECRETS_DIR`: paths to the `keys` and `secrets` directories or files (ERC-2335 style keystores, see [Signer config](../configuration/#signer-module) for more info) For storing proxy keys we currently support: - `CB_PROXY_STORE_DIR`: directory where proxy keys and delegations will be saved in plaintext (for testing purposes only) @@ -54,5 +55,4 @@ CB_CONFIG=./cb-config.toml commit-boost-pbs ``` ## Security -Running the modules natively means you opt out of the security guarantees made by Docker and it's up to you how to setup and ensure the modules run safely. - +Running the modules natively means you opt out of the security guarantees made by Docker and it's up to you how to setup and ensure the modules run safely. diff --git a/mux_keys.example.json b/mux_keys.example.json new file mode 100644 index 00000000..6f309acd --- /dev/null +++ b/mux_keys.example.json @@ -0,0 +1,5 @@ +[ + "0x8160998addda06f2956e5d1945461f33dbc140486e972b96f341ebf2bdb553a0e3feb127451f5332dd9e33469d37ca67", + "0x87b5dc7f78b68a7b5e7f2e8b9c2115f968332cbf6fc2caaaaa2c9dc219a58206b72c924805f2278c58b55790a2c3bf17", + "0x89e2f50fe5cd07ed2ff0a01340b2f717aa65cced6d89a79fdecc1e924be5f4bbe75c11598bb9a53d307bb39b8223bc52" +] \ No newline at end of file diff --git a/tests/data/keystores/keys/0x883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4/voting-keystore.json b/tests/data/keystores/keys/0x883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4/voting-keystore.json new file mode 100644 index 00000000..72b13cad --- /dev/null +++ b/tests/data/keystores/keys/0x883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4/voting-keystore.json @@ -0,0 +1 @@ +{"crypto":{"kdf":{"function":"pbkdf2","params":{"dklen":32,"c":262144,"prf":"hmac-sha256","salt":"0ded1a0ed9d0d5aa9c41ac1a6be6d9943835f9ccbe1081869af74925611a4687"},"message":""},"checksum":{"function":"sha256","params":{},"message":"b1de458543b0532666e8f24e679f93ed6f168fd09de1da7c3f4f79b7fa2f2412"},"cipher":{"function":"aes-128-ctr","params":{"iv":"3ca34eb318e53a4c7e545571d8d0c7af"},"message":"acc6c222eea80974107b5a9bf824c8156edaad944f0d444a1aab4cc2118cecc5"}},"description":"0x883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4","pubkey":"883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4","path":"","uuid":"61c06c9c-b0bc-4022-9bf8-a2f250d4e751","version":4} \ No newline at end of file diff --git a/tests/data/keystores/keys/0xb3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9/voting-keystore.json b/tests/data/keystores/keys/0xb3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9/voting-keystore.json new file mode 100644 index 00000000..ba717c1c --- /dev/null +++ b/tests/data/keystores/keys/0xb3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9/voting-keystore.json @@ -0,0 +1 @@ +{"crypto":{"kdf":{"function":"pbkdf2","params":{"dklen":32,"c":262144,"prf":"hmac-sha256","salt":"2154bba4d5999c6069442db5b499b2b27b6c2f54f36490e51163934dd4fb412e"},"message":""},"checksum":{"function":"sha256","params":{},"message":"1db4975098c97905f1dd9a9207cab0a9af7e16bebdab700ee08efb51e068017f"},"cipher":{"function":"aes-128-ctr","params":{"iv":"2265a3b57110b46c08295e53379165b5"},"message":"3bd312cc34cebfdd890c9704752191ed93ecd562bb62d2d8ceb4ff945b58b790"}},"description":"0xb3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9","pubkey":"b3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9","path":"","uuid":"a8457299-739d-42fb-a0f6-961020f22b8e","version":4} \ No newline at end of file diff --git a/tests/data/keystores/lodestar-secrets/0x883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4 b/tests/data/keystores/lodestar-secrets/0x883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4 new file mode 100644 index 00000000..88a84e76 --- /dev/null +++ b/tests/data/keystores/lodestar-secrets/0x883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4 @@ -0,0 +1 @@ +2MtI__9JSKFcN2Syqpdy5MmM8RXZbM26Pel7G1HCuIg= \ No newline at end of file diff --git a/tests/data/keystores/lodestar-secrets/0xb3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9 b/tests/data/keystores/lodestar-secrets/0xb3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9 new file mode 100644 index 00000000..b2ce4dfd --- /dev/null +++ b/tests/data/keystores/lodestar-secrets/0xb3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9 @@ -0,0 +1 @@ +BWBoV1UZpkO4cUA-t8T9aViJ0sBfilR7qJFHgU4tBSc= \ No newline at end of file diff --git a/tests/data/keystores/prysm/direct/accounts/all-accounts.keystore.json b/tests/data/keystores/prysm/direct/accounts/all-accounts.keystore.json new file mode 100644 index 00000000..40d189bb --- /dev/null +++ b/tests/data/keystores/prysm/direct/accounts/all-accounts.keystore.json @@ -0,0 +1,29 @@ +{ + "crypto": { + "kdf": { + "function": "pbkdf2", + "params": { + "dklen": 32, + "c": 262144, + "prf": "hmac-sha256", + "salt": "0e538586adf998caa12c7a42772cb559ccb49e69c71159d924f0ade3e4a86240" + }, + "message": "" + }, + "checksum": { + "function": "sha256", + "params": {}, + "message": "da07b64a482f95c322b6c506dea20f53007391bc7c60255e480fef5994d6d826" + }, + "cipher": { + "function": "aes-128-ctr", + "params": { + "iv": "7180c42635fb41584db7b9f14264b504" + }, + "message": "11d4016d0893228d09e14d9d354a6d8a5c280eefbb8277c36b281a95dfe9a5c506ae8538f6a25799d1c16c32319bb126ceff4c09a3de5ec355ed8e1c5662e1942e2b32a28977c59ed9a7e3d8756e69b3862dd03f38391ae110f48b0b3520c715633afb7ed62fc6ec9b41b4318e629da6b44ed216b4de02b05b2b0224c083f5ec932980a8d13672562a73bead88b61760753bff91a484dfdc50442686ee054894a61b072c52c934d0763c9502f9988b10f1a50176a2d2a9ba2186d620faa9f97be4762be86da03fa2209c9c7c1974158539a7835b8426225ff6ff173790c55a304282b9a8991ddc5cb9c6e7e7e1cd7ec75e02deeb9b82e0dcfed874fe58fb7bf8a027f9bc127e1d9472afc27ac34575dcb67cc71522ca0c915ba023224a" + } + }, + "path": "", + "uuid": "7d7e3a49-c4ca-4d0a-a0e6-cb199dd72a85", + "version": 4 +} \ No newline at end of file diff --git a/tests/data/keystores/prysm/empty_pass b/tests/data/keystores/prysm/empty_pass new file mode 100644 index 00000000..e69de29b diff --git a/tests/data/keystores/prysm/keymanageropts.json b/tests/data/keystores/prysm/keymanageropts.json new file mode 100644 index 00000000..13c0529c --- /dev/null +++ b/tests/data/keystores/prysm/keymanageropts.json @@ -0,0 +1 @@ +{"direct_eip_version": "EIP-2335"} \ No newline at end of file diff --git a/tests/data/keystores/pubkeys.json b/tests/data/keystores/pubkeys.json new file mode 100644 index 00000000..eca508ca --- /dev/null +++ b/tests/data/keystores/pubkeys.json @@ -0,0 +1 @@ +["0xb3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9","0x883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4"] \ No newline at end of file diff --git a/tests/data/keystores/secrets/0x883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4 b/tests/data/keystores/secrets/0x883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4 new file mode 100644 index 00000000..88a84e76 --- /dev/null +++ b/tests/data/keystores/secrets/0x883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4 @@ -0,0 +1 @@ +2MtI__9JSKFcN2Syqpdy5MmM8RXZbM26Pel7G1HCuIg= \ No newline at end of file diff --git a/tests/data/keystores/secrets/0xb3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9 b/tests/data/keystores/secrets/0xb3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9 new file mode 100644 index 00000000..b2ce4dfd --- /dev/null +++ b/tests/data/keystores/secrets/0xb3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9 @@ -0,0 +1 @@ +BWBoV1UZpkO4cUA-t8T9aViJ0sBfilR7qJFHgU4tBSc= \ No newline at end of file diff --git a/tests/data/keystores/teku-keys/0x883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4.json b/tests/data/keystores/teku-keys/0x883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4.json new file mode 100644 index 00000000..72b13cad --- /dev/null +++ b/tests/data/keystores/teku-keys/0x883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4.json @@ -0,0 +1 @@ +{"crypto":{"kdf":{"function":"pbkdf2","params":{"dklen":32,"c":262144,"prf":"hmac-sha256","salt":"0ded1a0ed9d0d5aa9c41ac1a6be6d9943835f9ccbe1081869af74925611a4687"},"message":""},"checksum":{"function":"sha256","params":{},"message":"b1de458543b0532666e8f24e679f93ed6f168fd09de1da7c3f4f79b7fa2f2412"},"cipher":{"function":"aes-128-ctr","params":{"iv":"3ca34eb318e53a4c7e545571d8d0c7af"},"message":"acc6c222eea80974107b5a9bf824c8156edaad944f0d444a1aab4cc2118cecc5"}},"description":"0x883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4","pubkey":"883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4","path":"","uuid":"61c06c9c-b0bc-4022-9bf8-a2f250d4e751","version":4} \ No newline at end of file diff --git a/tests/data/keystores/teku-keys/0xb3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9.json b/tests/data/keystores/teku-keys/0xb3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9.json new file mode 100644 index 00000000..ba717c1c --- /dev/null +++ b/tests/data/keystores/teku-keys/0xb3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9.json @@ -0,0 +1 @@ +{"crypto":{"kdf":{"function":"pbkdf2","params":{"dklen":32,"c":262144,"prf":"hmac-sha256","salt":"2154bba4d5999c6069442db5b499b2b27b6c2f54f36490e51163934dd4fb412e"},"message":""},"checksum":{"function":"sha256","params":{},"message":"1db4975098c97905f1dd9a9207cab0a9af7e16bebdab700ee08efb51e068017f"},"cipher":{"function":"aes-128-ctr","params":{"iv":"2265a3b57110b46c08295e53379165b5"},"message":"3bd312cc34cebfdd890c9704752191ed93ecd562bb62d2d8ceb4ff945b58b790"}},"description":"0xb3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9","pubkey":"b3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9","path":"","uuid":"a8457299-739d-42fb-a0f6-961020f22b8e","version":4} \ No newline at end of file diff --git a/tests/data/keystores/teku-secrets/0x883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4.txt b/tests/data/keystores/teku-secrets/0x883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4.txt new file mode 100644 index 00000000..88a84e76 --- /dev/null +++ b/tests/data/keystores/teku-secrets/0x883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4.txt @@ -0,0 +1 @@ +2MtI__9JSKFcN2Syqpdy5MmM8RXZbM26Pel7G1HCuIg= \ No newline at end of file diff --git a/tests/data/keystores/teku-secrets/0xb3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9.txt b/tests/data/keystores/teku-secrets/0xb3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9.txt new file mode 100644 index 00000000..b2ce4dfd --- /dev/null +++ b/tests/data/keystores/teku-secrets/0xb3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9.txt @@ -0,0 +1 @@ +BWBoV1UZpkO4cUA-t8T9aViJ0sBfilR7qJFHgU4tBSc= \ No newline at end of file diff --git a/tests/data/proxy/keys/0xac5e059177afc33263e95d0be0690138b9a1d79a6e19018086a0362e0c30a50bf9e05a08cb44785724d0b2718c5c7118/TEST_MODULE/bls/0xa77084280678d9f1efe4ef47a3d62af27872ce82db19a35ee012c4fd5478e6b1123b8869032ba18b2383e8873294f0ba.json b/tests/data/proxy/keys/0xac5e059177afc33263e95d0be0690138b9a1d79a6e19018086a0362e0c30a50bf9e05a08cb44785724d0b2718c5c7118/TEST_MODULE/bls/0xa77084280678d9f1efe4ef47a3d62af27872ce82db19a35ee012c4fd5478e6b1123b8869032ba18b2383e8873294f0ba.json new file mode 100644 index 00000000..e55d22ab --- /dev/null +++ b/tests/data/proxy/keys/0xac5e059177afc33263e95d0be0690138b9a1d79a6e19018086a0362e0c30a50bf9e05a08cb44785724d0b2718c5c7118/TEST_MODULE/bls/0xa77084280678d9f1efe4ef47a3d62af27872ce82db19a35ee012c4fd5478e6b1123b8869032ba18b2383e8873294f0ba.json @@ -0,0 +1 @@ +{"crypto":{"kdf":{"function":"scrypt","params":{"dklen":32,"n":262144,"r":8,"p":1,"salt":"c84961e82805391c0f761cf342c1e6293dab474d388179f4fdea8386310d3920"},"message":""},"checksum":{"function":"sha256","params":{},"message":"4a6ed334d558abeb81ea04893eeed79214eaec476d6225bacccbc7ffbde95843"},"cipher":{"function":"aes-128-ctr","params":{"iv":"bff99639dd8ad6e3339177bad87dcac4"},"message":"e9bca9829d688baa09e65ddecadedd1cb6b49c024a9fff98630817cf835aa9bb"}},"uuid":"38fcc27a-da59-4604-8858-cf3d58d06acc","path":null,"pubkey":"a77084280678d9f1efe4ef47a3d62af27872ce82db19a35ee012c4fd5478e6b1123b8869032ba18b2383e8873294f0ba","version":4,"description":"0xa77084280678d9f1efe4ef47a3d62af27872ce82db19a35ee012c4fd5478e6b1123b8869032ba18b2383e8873294f0ba","name":null} \ No newline at end of file diff --git a/tests/data/proxy/keys/0xac5e059177afc33263e95d0be0690138b9a1d79a6e19018086a0362e0c30a50bf9e05a08cb44785724d0b2718c5c7118/TEST_MODULE/bls/0xa77084280678d9f1efe4ef47a3d62af27872ce82db19a35ee012c4fd5478e6b1123b8869032ba18b2383e8873294f0ba.sig b/tests/data/proxy/keys/0xac5e059177afc33263e95d0be0690138b9a1d79a6e19018086a0362e0c30a50bf9e05a08cb44785724d0b2718c5c7118/TEST_MODULE/bls/0xa77084280678d9f1efe4ef47a3d62af27872ce82db19a35ee012c4fd5478e6b1123b8869032ba18b2383e8873294f0ba.sig new file mode 100644 index 00000000..2ac675c2 --- /dev/null +++ b/tests/data/proxy/keys/0xac5e059177afc33263e95d0be0690138b9a1d79a6e19018086a0362e0c30a50bf9e05a08cb44785724d0b2718c5c7118/TEST_MODULE/bls/0xa77084280678d9f1efe4ef47a3d62af27872ce82db19a35ee012c4fd5478e6b1123b8869032ba18b2383e8873294f0ba.sig @@ -0,0 +1 @@ +0xb2e44e777cc68b50b9d19cbded2b2b6a0a5c428e3c341b5ade22f90e67679116511855b94e26ae930d1350628933994713f4fd48d1d70715a99d875a564c88e229aa9bb2d89e9f60b725c97300659bd0fc7bc1e2e599f12625b81ef63890f857 \ No newline at end of file diff --git a/tests/data/proxy/secrets/0xac5e059177afc33263e95d0be0690138b9a1d79a6e19018086a0362e0c30a50bf9e05a08cb44785724d0b2718c5c7118/TEST_MODULE/bls/0xa77084280678d9f1efe4ef47a3d62af27872ce82db19a35ee012c4fd5478e6b1123b8869032ba18b2383e8873294f0ba b/tests/data/proxy/secrets/0xac5e059177afc33263e95d0be0690138b9a1d79a6e19018086a0362e0c30a50bf9e05a08cb44785724d0b2718c5c7118/TEST_MODULE/bls/0xa77084280678d9f1efe4ef47a3d62af27872ce82db19a35ee012c4fd5478e6b1123b8869032ba18b2383e8873294f0ba new file mode 100644 index 00000000..6d8f0bc3 --- /dev/null +++ b/tests/data/proxy/secrets/0xac5e059177afc33263e95d0be0690138b9a1d79a6e19018086a0362e0c30a50bf9e05a08cb44785724d0b2718c5c7118/TEST_MODULE/bls/0xa77084280678d9f1efe4ef47a3d62af27872ce82db19a35ee012c4fd5478e6b1123b8869032ba18b2383e8873294f0ba @@ -0,0 +1 @@ +4ecdc703bdc0b4957876643fbba74f20f5cf7e4435b852fcd9b2d0c2b977a854 \ No newline at end of file diff --git a/tests/src/mock_validator.rs b/tests/src/mock_validator.rs index 44301235..a8f6a8a3 100644 --- a/tests/src/mock_validator.rs +++ b/tests/src/mock_validator.rs @@ -16,8 +16,11 @@ impl MockValidator { Ok(Self { comm_boost: generate_mock_relay(port, BlsPublicKey::default())? }) } - pub async fn do_get_header(&self) -> Result<(), Error> { - let url = self.comm_boost.get_header_url(0, B256::ZERO, BlsPublicKey::ZERO).unwrap(); + pub async fn do_get_header(&self, pubkey: Option) -> Result<(), Error> { + let url = self + .comm_boost + .get_header_url(0, B256::ZERO, pubkey.unwrap_or(BlsPublicKey::ZERO)) + .unwrap(); let res = self.comm_boost.client.get(url).send().await?.bytes().await?; assert!(serde_json::from_slice::(&res).is_ok()); diff --git a/tests/tests/pbs_integration.rs b/tests/tests/pbs_integration.rs index 9fdf2bab..88dea73c 100644 --- a/tests/tests/pbs_integration.rs +++ b/tests/tests/pbs_integration.rs @@ -1,4 +1,5 @@ use std::{ + collections::HashMap, net::{Ipv4Addr, SocketAddr}, sync::Arc, time::Duration, @@ -7,7 +8,7 @@ use std::{ use alloy::primitives::U256; use cb_common::{ - config::{PbsConfig, PbsModuleConfig}, + config::{PbsConfig, PbsModuleConfig, RuntimeMuxConfig}, pbs::RelayClient, signer::{random_secret, BlsPublicKey}, types::Chain, @@ -48,6 +49,7 @@ fn to_pbs_config(chain: Chain, pbs_config: PbsConfig, relays: Vec) signer_client: None, event_publisher: None, relays, + muxes: None, } } @@ -73,7 +75,7 @@ async fn test_get_header() -> Result<()> { let mock_validator = MockValidator::new(port)?; info!("Sending get header"); - let res = mock_validator.do_get_header().await; + let res = mock_validator.do_get_header(None).await; assert!(res.is_ok()); assert_eq!(mock_state.received_get_header(), 1); @@ -197,3 +199,52 @@ async fn test_submit_block_too_large() -> Result<()> { assert_eq!(mock_state.received_submit_block(), 1); Ok(()) } + +#[tokio::test] +async fn test_mux() -> Result<()> { + setup_test_env(); + let signer = random_secret(); + let pubkey_1: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()).into(); + let signer_2 = random_secret(); + let pubkey_2: BlsPublicKey = blst_pubkey_to_alloy(&signer_2.sk_to_pk()).into(); + + let chain = Chain::Holesky; + let port = 3600; + + let mux_relay = generate_mock_relay(port + 1, *pubkey_1)?; + let relays = vec![mux_relay.clone(), generate_mock_relay(port + 2, *pubkey_2)?]; + let mock_state = Arc::new(MockRelayState::new(chain, signer)); + tokio::spawn(start_mock_relay_service(mock_state.clone(), port + 1)); + tokio::spawn(start_mock_relay_service(mock_state.clone(), port + 2)); + + let mut config = to_pbs_config(chain, get_pbs_static_config(port), relays); + let mux = RuntimeMuxConfig { + id: String::from("test"), + config: config.pbs_config.clone(), + relays: vec![mux_relay], + }; + + let validator_pubkey = blst_pubkey_to_alloy(&random_secret().sk_to_pk()); + + config.muxes = Some(HashMap::from([(validator_pubkey, mux)])); + + let state = PbsState::new(config); + tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + + // leave some time to start servers + tokio::time::sleep(Duration::from_millis(100)).await; + + let mock_validator = MockValidator::new(port)?; + info!("Sending get header with default"); + let res = mock_validator.do_get_header(None).await; + + assert!(res.is_ok()); + assert_eq!(mock_state.received_get_header(), 2); // both relays were used + + info!("Sending get header with mux"); + let res = mock_validator.do_get_header(Some(validator_pubkey)).await; + + assert!(res.is_ok()); + assert_eq!(mock_state.received_get_header(), 3); // only one relay was used + Ok(()) +}