diff --git a/Cargo.lock b/Cargo.lock index c94eea92..14d3e1da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -410,6 +410,7 @@ dependencies = [ "tower-http", "tracing", "tracing-subscriber", + "url", "zksync_types", "zksync_web3_decl", ] @@ -496,6 +497,7 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber", + "url", "zksync-web3-rs", "zksync_contracts", "zksync_multivm", diff --git a/Cargo.toml b/Cargo.toml index 69789498..6ad6acd9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,6 +77,7 @@ tracing-subscriber = { version = "0.3", features = [ "json", "local-time", ] } +url = "2.5.4" ######################### # Test dependencies # diff --git a/crates/cli/Cargo.toml b/crates/cli/Cargo.toml index ca38223c..c97f2490 100644 --- a/crates/cli/Cargo.toml +++ b/crates/cli/Cargo.toml @@ -34,6 +34,7 @@ tracing-subscriber.workspace = true tower.workspace = true tower-http.workspace = true flate2.workspace = true +url.workspace = true [dev-dependencies] tempdir.workspace = true diff --git a/crates/cli/src/cli.rs b/crates/cli/src/cli.rs index 9fb70feb..a5b2e7e6 100644 --- a/crates/cli/src/cli.rs +++ b/crates/cli/src/cli.rs @@ -7,6 +7,7 @@ use anvil_zksync_config::types::{ AccountGenerator, CacheConfig, CacheType, Genesis, SystemContractsOptions, }; use anvil_zksync_config::TestNodeConfig; +use anvil_zksync_core::node::fork::ForkConfig; use anvil_zksync_core::{ node::{InMemoryNode, VersionedState}, utils::write_json_file, @@ -23,6 +24,7 @@ use std::env; use std::io::Read; use std::net::IpAddr; use std::path::PathBuf; +use std::str::FromStr; use std::time::Duration; use std::{ future::Future, @@ -30,6 +32,7 @@ use std::{ task::{Context, Poll}, }; use tokio::time::{Instant, Interval}; +use url::Url; use zksync_types::{H256, U256}; #[derive(Debug, Parser, Clone)] @@ -340,7 +343,7 @@ pub struct ForkArgs { alias = "network", help = "Network to fork from (e.g., http://XXX:YY, mainnet, sepolia-testnet)." )] - pub fork_url: String, + pub fork_url: ForkUrl, // Fork at a given L2 miniblock height. // If not set - will use the current finalized block from the network. #[arg( @@ -363,6 +366,48 @@ pub struct ForkArgs { pub fork_transaction_hash: Option, } +#[derive(Clone, Debug)] +pub enum ForkUrl { + Mainnet, + SepoliaTestnet, + Other(Url), +} + +impl ForkUrl { + const MAINNET_URL: &'static str = "https://mainnet.era.zksync.io:443"; + const SEPOLIA_TESTNET_URL: &'static str = "https://sepolia.era.zksync.dev:443"; + + pub fn to_config(&self) -> ForkConfig { + match self { + ForkUrl::Mainnet => ForkConfig { + url: Self::MAINNET_URL.parse().unwrap(), + estimate_gas_price_scale_factor: 1.5, + estimate_gas_scale_factor: 1.4, + }, + ForkUrl::SepoliaTestnet => ForkConfig { + url: Self::SEPOLIA_TESTNET_URL.parse().unwrap(), + estimate_gas_price_scale_factor: 2.0, + estimate_gas_scale_factor: 1.3, + }, + ForkUrl::Other(url) => ForkConfig::unknown(url.clone()), + } + } +} + +impl FromStr for ForkUrl { + type Err = anyhow::Error; + + fn from_str(s: &str) -> std::result::Result { + if s == "mainnet" { + Ok(ForkUrl::Mainnet) + } else if s == "sepolia-testnet" { + Ok(ForkUrl::SepoliaTestnet) + } else { + Ok(Url::from_str(s).map(ForkUrl::Other)?) + } + } +} + #[derive(Debug, Parser, Clone)] pub struct ReplayArgs { /// Whether to fork from existing network. @@ -377,7 +422,7 @@ pub struct ReplayArgs { alias = "network", help = "Network to fork from (e.g., http://XXX:YY, mainnet, sepolia-testnet)." )] - pub fork_url: String, + pub fork_url: ForkUrl, /// Transaction hash to replay. #[arg(help = "Transaction hash to replay.")] pub tx: H256, diff --git a/crates/cli/src/main.rs b/crates/cli/src/main.rs index c4480b11..f7c663cc 100644 --- a/crates/cli/src/main.rs +++ b/crates/cli/src/main.rs @@ -1,5 +1,5 @@ use crate::bytecode_override::override_bytecodes; -use crate::cli::{Cli, Command, PeriodicStateDumper}; +use crate::cli::{Cli, Command, ForkUrl, PeriodicStateDumper}; use crate::utils::update_with_fork_details; use anvil_zksync_api_server::NodeServerBuilder; use anvil_zksync_config::constants::{ @@ -10,7 +10,7 @@ use anvil_zksync_config::constants::{ use anvil_zksync_config::types::SystemContractsOptions; use anvil_zksync_config::ForkPrintInfo; use anvil_zksync_core::filters::EthFilters; -use anvil_zksync_core::node::fork::ForkDetails; +use anvil_zksync_core::node::fork::ForkClient; use anvil_zksync_core::node::{ BlockSealer, BlockSealerMode, ImpersonationManager, InMemoryNode, InMemoryNodeInner, NodeExecutor, StorageKeyLayout, TestNodeFeeInputProvider, TxPool, @@ -27,8 +27,7 @@ use tokio::sync::RwLock; use tower_http::cors::AllowOrigin; use tracing_subscriber::filter::LevelFilter; use zksync_types::fee_model::{FeeModelConfigV2, FeeParams}; -use zksync_types::H160; -use zksync_web3_decl::namespaces::ZksNamespaceClient; +use zksync_types::{L2BlockNumber, H160}; mod bytecode_override; mod cli; @@ -57,7 +56,7 @@ async fn main() -> anyhow::Result<()> { // Use `Command::Run` as default. let command = command.as_ref().unwrap_or(&Command::Run); - let fork_details = match command { + let (fork_client, transactions_to_replay) = match command { Command::Run => { if config.offline { tracing::warn!("Running in offline mode: default fee parameters will be used."); @@ -79,16 +78,12 @@ async fn main() -> anyhow::Result<()> { config.l1_pubdata_price.or(Some(DEFAULT_FAIR_PUBDATA_PRICE)), ) .with_chain_id(config.chain_id.or(Some(TEST_NODE_NETWORK_ID))); - None + (None, Vec::new()) } else { // Initialize the client to get the fee params - let (_, client) = ForkDetails::fork_network_and_client("mainnet") - .map_err(|e| anyhow!("Failed to initialize client: {:?}", e))?; - - let fee = client.get_fee_params().await.map_err(|e| { - tracing::error!("Failed to fetch fee params: {:?}", e); - anyhow!(e) - })?; + let client = + ForkClient::at_block_number(ForkUrl::Mainnet.to_config(), None).await?; + let fee = client.get_fee_params().await?; match fee { FeeParams::V2(fee_v2) => { @@ -120,56 +115,37 @@ async fn main() -> anyhow::Result<()> { } } - None + (None, Vec::new()) } } Command::Fork(fork) => { - let fork_details_result = if let Some(tx_hash) = fork.fork_transaction_hash { - // If fork_transaction_hash is provided, use from_network_tx - ForkDetails::from_network_tx(&fork.fork_url, tx_hash, &config.cache_config).await + // TODO: For now, we do not replay earlier transactions when forking to keep compatibility + // with the legacy forking behavior. + let (fork_client, _) = if let Some(tx_hash) = fork.fork_transaction_hash { + // If transaction hash is provided, we fork at the parent of block containing tx + ForkClient::at_before_tx(fork.fork_url.to_config(), tx_hash).await? } else { - // Otherwise, use from_network - ForkDetails::from_network( - &fork.fork_url, - fork.fork_block_number, - &config.cache_config, + // Otherwise, we fork at the provided block + ( + ForkClient::at_block_number( + fork.fork_url.to_config(), + fork.fork_block_number.map(|bn| L2BlockNumber(bn as u32)), + ) + .await?, + Vec::new(), ) - .await }; - update_with_fork_details(&mut config, fork_details_result).await? + update_with_fork_details(&mut config, &fork_client.details).await; + (Some(fork_client), Vec::new()) } Command::ReplayTx(replay_tx) => { - let fork_details_result = ForkDetails::from_network_tx( - &replay_tx.fork_url, - replay_tx.tx, - &config.cache_config, - ) - .await; - - update_with_fork_details(&mut config, fork_details_result).await? - } - }; + let (fork_client, earlier_txs) = + ForkClient::at_before_tx(replay_tx.fork_url.to_config(), replay_tx.tx).await?; - // If we're replaying the transaction, we need to sync to the previous block - // and then replay all the transactions that happened in - let transactions_to_replay = if let Command::ReplayTx(replay_tx) = command { - match fork_details - .as_ref() - .unwrap() - .get_earlier_transactions_in_same_block(replay_tx.tx) - { - Ok(txs) => txs, - Err(error) => { - tracing::error!( - "failed to get earlier transactions in the same block for replay tx: {:?}", - error - ); - return Err(anyhow!(error)); - } + update_with_fork_details(&mut config, &fork_client.details).await; + (Some(fork_client), earlier_txs) } - } else { - vec![] }; if matches!( @@ -181,28 +157,31 @@ async fn main() -> anyhow::Result<()> { } } - let fork_print_info = if let Some(fd) = fork_details.as_ref() { - let fee_model_config_v2 = match fd.fee_params { - Some(FeeParams::V2(fee_params_v2)) => { + let fork_print_info = if let Some(fork_client) = &fork_client { + let fee_model_config_v2 = match &fork_client.details.fee_params { + FeeParams::V2(fee_params_v2) => { let config = fee_params_v2.config(); - Some(FeeModelConfigV2 { + FeeModelConfigV2 { minimal_l2_gas_price: config.minimal_l2_gas_price, compute_overhead_part: config.compute_overhead_part, pubdata_overhead_part: config.pubdata_overhead_part, batch_overhead_l1_gas: config.batch_overhead_l1_gas, max_gas_per_batch: config.max_gas_per_batch, max_pubdata_per_batch: config.max_pubdata_per_batch, - }) + } } - _ => None, + _ => anyhow::bail!( + "fork is using unsupported fee parameters: {:?}", + fork_client.details.fee_params + ), }; Some(ForkPrintInfo { - network_rpc: fd.fork_source.get_fork_url().unwrap_or_default(), - l1_block: fd.l1_block.to_string(), - l2_block: fd.l2_miniblock.to_string(), - block_timestamp: fd.block_timestamp.to_string(), - fork_block_hash: format!("{:#x}", fd.l2_block.hash), + network_rpc: fork_client.url.to_string(), + l1_block: fork_client.details.batch_number.to_string(), + l2_block: fork_client.details.block_number.to_string(), + block_timestamp: fork_client.details.block_timestamp.to_string(), + fork_block_hash: format!("{:#x}", fork_client.details.block_hash), fee_model_config_v2, }) } else { @@ -216,7 +195,8 @@ async fn main() -> anyhow::Result<()> { } let pool = TxPool::new(impersonation.clone(), config.transaction_order); - let fee_input_provider = TestNodeFeeInputProvider::from_fork(fork_details.as_ref()); + let fee_input_provider = + TestNodeFeeInputProvider::from_fork(fork_client.as_ref().map(|f| &f.details)); let filters = Arc::new(RwLock::new(EthFilters::default())); let system_contracts = SystemContracts::from_options( &config.system_contracts_options, @@ -229,8 +209,8 @@ async fn main() -> anyhow::Result<()> { StorageKeyLayout::ZkEra }; - let (node_inner, storage, blockchain, time) = InMemoryNodeInner::init( - fork_details, + let (node_inner, storage, blockchain, time, fork) = InMemoryNodeInner::init( + fork_client, fee_input_provider.clone(), filters, config.clone(), @@ -258,6 +238,7 @@ async fn main() -> anyhow::Result<()> { node_inner, blockchain, storage, + fork, node_handle, Some(observability), time, diff --git a/crates/cli/src/utils.rs b/crates/cli/src/utils.rs index fae302e8..d2df6e3f 100644 --- a/crates/cli/src/utils.rs +++ b/crates/cli/src/utils.rs @@ -11,36 +11,23 @@ pub fn parse_genesis_file(path: &str) -> Result { } /// Updates the configuration from fork details. -pub async fn update_with_fork_details( - config: &mut TestNodeConfig, - fork_details_result: Result, -) -> Result, anyhow::Error> { - match fork_details_result { - Ok(fd) => { - let l1_gas_price = config.l1_gas_price.or(Some(fd.l1_gas_price)); - let l2_gas_price = config.l2_gas_price.or(Some(fd.l2_fair_gas_price)); - let l1_pubdata_price = config.l1_pubdata_price.or(Some(fd.fair_pubdata_price)); - let price_scale = config - .price_scale_factor - .or(Some(fd.estimate_gas_price_scale_factor)); - let gas_limit_scale = config - .limit_scale_factor - .or(Some(fd.estimate_gas_scale_factor)); - let chain_id = config.chain_id.or(Some(fd.chain_id.as_u64() as u32)); +pub async fn update_with_fork_details(config: &mut TestNodeConfig, fd: &ForkDetails) { + let l1_gas_price = config.l1_gas_price.or(Some(fd.l1_gas_price)); + let l2_gas_price = config.l2_gas_price.or(Some(fd.l2_fair_gas_price)); + let l1_pubdata_price = config.l1_pubdata_price.or(Some(fd.fair_pubdata_price)); + let price_scale = config + .price_scale_factor + .or(Some(fd.estimate_gas_price_scale_factor)); + let gas_limit_scale = config + .limit_scale_factor + .or(Some(fd.estimate_gas_scale_factor)); + let chain_id = config.chain_id.or(Some(fd.chain_id.as_u64() as u32)); - config - .update_l1_gas_price(l1_gas_price) - .update_l2_gas_price(l2_gas_price) - .update_l1_pubdata_price(l1_pubdata_price) - .update_price_scale(price_scale) - .update_gas_limit_scale(gas_limit_scale) - .update_chain_id(chain_id); - - Ok(Some(fd)) - } - Err(error) => { - tracing::error!("Error while attempting to fork: {:?}", error); - Err(anyhow::anyhow!(error)) - } - } + config + .update_l1_gas_price(l1_gas_price) + .update_l2_gas_price(l2_gas_price) + .update_l1_pubdata_price(l1_pubdata_price) + .update_price_scale(price_scale) + .update_gas_limit_scale(gas_limit_scale) + .update_chain_id(chain_id); } diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index e7ceafdb..ad47aa6a 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -32,7 +32,7 @@ pub struct ForkPrintInfo { pub l2_block: String, pub block_timestamp: String, pub fork_block_hash: String, - pub fee_model_config_v2: Option, + pub fee_model_config_v2: FeeModelConfigV2, } /// Defines the configuration parameters for the [InMemoryNode]. @@ -282,28 +282,38 @@ impl TestNodeConfig { "Fork Block Hash: {}", format!("{:#}", fd.fork_block_hash).green() ); - if let Some(fee_config) = &fd.fee_model_config_v2 { - tracing::info!( - "Compute Overhead Part: {}", - fee_config.compute_overhead_part.to_string().green() - ); - tracing::info!( - "Pubdata Overhead Part: {}", - fee_config.pubdata_overhead_part.to_string().green() - ); - tracing::info!( - "Batch Overhead L1 Gas: {}", - fee_config.batch_overhead_l1_gas.to_string().green() - ); - tracing::info!( - "Max Gas Per Batch: {}", - fee_config.max_gas_per_batch.to_string().green() - ); - tracing::info!( - "Max Pubdata Per Batch: {}", - fee_config.max_pubdata_per_batch.to_string().green() - ); - } + tracing::info!( + "Compute Overhead Part: {}", + fd.fee_model_config_v2 + .compute_overhead_part + .to_string() + .green() + ); + tracing::info!( + "Pubdata Overhead Part: {}", + fd.fee_model_config_v2 + .pubdata_overhead_part + .to_string() + .green() + ); + tracing::info!( + "Batch Overhead L1 Gas: {}", + fd.fee_model_config_v2 + .batch_overhead_l1_gas + .to_string() + .green() + ); + tracing::info!( + "Max Gas Per Batch: {}", + fd.fee_model_config_v2.max_gas_per_batch.to_string().green() + ); + tracing::info!( + "Max Pubdata Per Batch: {}", + fd.fee_model_config_v2 + .max_pubdata_per_batch + .to_string() + .green() + ); println!("\n"); } else { tracing::info!("Network Configuration"); diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index 75bbb313..778177b4 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -41,6 +41,7 @@ time.workspace = true flate2.workspace = true thiserror.workspace = true async-trait.workspace = true +url.workspace = true [dev-dependencies] maplit.workspace = true diff --git a/crates/core/src/http_fork_source.rs b/crates/core/src/http_fork_source.rs deleted file mode 100644 index 45b8f7e6..00000000 --- a/crates/core/src/http_fork_source.rs +++ /dev/null @@ -1,778 +0,0 @@ -use std::{ - str::FromStr, - sync::{Arc, RwLock}, -}; - -use crate::cache::Cache; -use crate::node::fork::ForkSource; -use crate::utils::block_on; -use anvil_zksync_config::types::CacheConfig; -use eyre::Context; -use zksync_types::{ - api::{BridgeAddresses, Transaction}, - url::SensitiveUrl, -}; -use zksync_types::{H256, U256}; -use zksync_web3_decl::{ - client::Client, - namespaces::{EthNamespaceClient, ZksNamespaceClient}, - types::Index, -}; -use zksync_web3_decl::{client::L2, types::Token}; - -#[derive(Debug, Clone)] -/// Fork source that gets the data via HTTP requests. -pub struct HttpForkSource { - /// URL for the network to fork. - pub fork_url: String, - /// Cache for network data. - pub(crate) cache: Arc>, -} - -impl HttpForkSource { - pub fn new(fork_url: String, cache_config: CacheConfig) -> Self { - Self { - fork_url, - cache: Arc::new(RwLock::new(Cache::new(cache_config))), - } - } - - pub fn create_client(&self) -> Client { - let url = SensitiveUrl::from_str(&self.fork_url) - .unwrap_or_else(|_| panic!("Unable to parse client URL: {}", &self.fork_url)); - Client::http(url) - .unwrap_or_else(|_| panic!("Unable to create a client for fork: {}", self.fork_url)) - .build() - } -} - -impl ForkSource for HttpForkSource { - fn get_fork_url(&self) -> eyre::Result { - Ok(self.fork_url.clone()) - } - - fn get_storage_at( - &self, - address: zksync_types::Address, - idx: zksync_types::U256, - block: Option, - ) -> eyre::Result { - let client = self.create_client(); - block_on(async move { client.get_storage_at(address, idx, block).await }) - .wrap_err("fork http client failed") - } - - fn get_bytecode_by_hash(&self, hash: zksync_types::H256) -> eyre::Result>> { - let client = self.create_client(); - block_on(async move { client.get_bytecode_by_hash(hash).await }) - .wrap_err("fork http client failed") - } - - fn get_transaction_by_hash( - &self, - hash: zksync_types::H256, - ) -> eyre::Result> { - if let Ok(Some(transaction)) = self - .cache - .read() - .map(|guard| guard.get_transaction(&hash).cloned()) - { - tracing::debug!("using cached transaction for {hash}"); - return Ok(Some(transaction)); - } - - let client = self.create_client(); - block_on(async move { client.get_transaction_by_hash(hash).await }) - .inspect(|maybe_transaction| { - if let Some(transaction) = &maybe_transaction { - self.cache - .write() - .map(|mut guard| guard.insert_transaction(hash, transaction.clone())) - .unwrap_or_else(|err| { - tracing::warn!( - "failed writing to cache for 'get_transaction_by_hash': {:?}", - err - ) - }); - } - }) - .wrap_err("fork http client failed") - } - - fn get_transaction_details( - &self, - hash: H256, - ) -> eyre::Result> { - let client = self.create_client(); - // n.b- We don't cache these responses as they will change through the lifecycle of the transaction - // and caching could be error-prone. in theory we could cache responses once the txn status - // is `final` or `failed` but currently this does not warrant the additional complexity. - block_on(async move { client.get_transaction_details(hash).await }) - .wrap_err("fork http client failed") - } - - fn get_raw_block_transactions( - &self, - block_number: zksync_types::L2BlockNumber, - ) -> eyre::Result> { - let number = block_number.0 as u64; - if let Ok(Some(transaction)) = self - .cache - .read() - .map(|guard| guard.get_block_raw_transactions(&number).cloned()) - { - tracing::debug!("using cached raw transactions for block {block_number}"); - return Ok(transaction); - } - - let client = self.create_client(); - block_on(async move { client.get_raw_block_transactions(block_number).await }) - .wrap_err("fork http client failed") - .inspect(|transactions| { - if !transactions.is_empty() { - self.cache - .write() - .map(|mut guard| { - guard.insert_block_raw_transactions(number, transactions.clone()) - }) - .unwrap_or_else(|err| { - tracing::warn!( - "failed writing to cache for 'get_raw_block_transactions': {:?}", - err - ) - }); - } - }) - } - - fn get_block_by_hash( - &self, - hash: zksync_types::H256, - full_transactions: bool, - ) -> eyre::Result>> { - if let Ok(Some(block)) = self - .cache - .read() - .map(|guard| guard.get_block(&hash, full_transactions).cloned()) - { - tracing::debug!("using cached block for {hash}"); - return Ok(Some(block)); - } - - let client = self.create_client(); - block_on(async move { client.get_block_by_hash(hash, full_transactions).await }) - .inspect(|block| { - if let Some(block) = &block { - self.cache - .write() - .map(|mut guard| guard.insert_block(hash, full_transactions, block.clone())) - .unwrap_or_else(|err| { - tracing::warn!( - "failed writing to cache for 'get_block_by_hash': {:?}", - err - ) - }); - } - }) - .wrap_err("fork http client failed") - } - - fn get_block_by_number( - &self, - block_number: zksync_types::api::BlockNumber, - full_transactions: bool, - ) -> eyre::Result>> { - let maybe_number = match block_number { - zksync_types::api::BlockNumber::Number(block_number) => Some(block_number), - _ => None, - }; - - if let Some(block) = maybe_number.and_then(|number| { - self.cache.read().ok().and_then(|guard| { - guard - .get_block_hash(&number.as_u64()) - .and_then(|hash| guard.get_block(hash, full_transactions).cloned()) - }) - }) { - tracing::debug!("using cached block for {block_number}"); - return Ok(Some(block)); - } - - let client = self.create_client(); - block_on(async move { - client - .get_block_by_number(block_number, full_transactions) - .await - }) - .inspect(|block| { - if let Some(block) = &block { - self.cache - .write() - .map(|mut guard| { - guard.insert_block(block.hash, full_transactions, block.clone()) - }) - .unwrap_or_else(|err| { - tracing::warn!( - "failed writing to cache for 'get_block_by_number': {:?}", - err - ) - }); - } - }) - .wrap_err("fork http client failed") - } - - /// Returns the transaction count for a given block hash. - fn get_block_transaction_count_by_hash(&self, block_hash: H256) -> eyre::Result> { - let client = self.create_client(); - block_on(async move { client.get_block_transaction_count_by_hash(block_hash).await }) - .wrap_err("fork http client failed") - } - - /// Returns the transaction count for a given block number. - fn get_block_transaction_count_by_number( - &self, - block_number: zksync_types::api::BlockNumber, - ) -> eyre::Result> { - let client = self.create_client(); - block_on(async move { - client - .get_block_transaction_count_by_number(block_number) - .await - }) - .wrap_err("fork http client failed") - } - - /// Returns information about a transaction by block hash and transaction index position. - fn get_transaction_by_block_hash_and_index( - &self, - block_hash: H256, - index: Index, - ) -> eyre::Result> { - let client = self.create_client(); - block_on(async move { - client - .get_transaction_by_block_hash_and_index(block_hash, index) - .await - }) - .wrap_err("fork http client failed") - } - - /// Returns information about a transaction by block number and transaction index position. - fn get_transaction_by_block_number_and_index( - &self, - block_number: zksync_types::api::BlockNumber, - index: Index, - ) -> eyre::Result> { - let client = self.create_client(); - block_on(async move { - client - .get_transaction_by_block_number_and_index(block_number, index) - .await - }) - .wrap_err("fork http client failed") - } - - /// Returns details of a block, given miniblock number - fn get_block_details( - &self, - miniblock: zksync_types::L2BlockNumber, - ) -> eyre::Result> { - let client = self.create_client(); - block_on(async move { client.get_block_details(miniblock).await }).wrap_err(format!( - "Failed to get block details for {} l2 block in fork http client", - miniblock - )) - } - - /// Returns fee parameters for the give source. - fn get_fee_params(&self) -> eyre::Result { - let client = self.create_client(); - block_on(async move { client.get_fee_params().await }).wrap_err("fork http client failed") - } - - /// Returns addresses of the default bridge contracts. - fn get_bridge_contracts(&self) -> eyre::Result { - if let Some(bridge_addresses) = self - .cache - .read() - .ok() - .and_then(|guard| guard.get_bridge_addresses().cloned()) - { - tracing::debug!("using cached bridge contracts"); - return Ok(bridge_addresses); - }; - - let client = self.create_client(); - block_on(async move { client.get_bridge_contracts().await }) - .inspect(|bridge_addresses| { - self.cache - .write() - .map(|mut guard| guard.set_bridge_addresses(bridge_addresses.clone())) - .unwrap_or_else(|err| { - tracing::warn!( - "failed writing to cache for 'get_bridge_contracts': {:?}", - err - ) - }); - }) - .wrap_err("fork http client failed") - } - - /// Returns known token addresses - fn get_confirmed_tokens(&self, from: u32, limit: u8) -> eyre::Result> { - if let Some(confirmed_tokens) = self - .cache - .read() - .ok() - .and_then(|guard| guard.get_confirmed_tokens(from, limit).cloned()) - { - tracing::debug!("using cached confirmed_tokens"); - return Ok(confirmed_tokens); - }; - - let client = self.create_client(); - block_on(async move { client.get_confirmed_tokens(from, limit).await }) - .inspect(|confirmed_tokens| { - self.cache - .write() - .map(|mut guard| { - guard.set_confirmed_tokens(from, limit, confirmed_tokens.clone()) - }) - .unwrap_or_else(|err| { - tracing::warn!( - "failed writing to cache for 'set_confirmed_tokens': {:?}", - err - ) - }); - }) - .wrap_err("fork http client failed") - } -} - -#[cfg(test)] -mod tests { - use std::str::FromStr; - - use zksync_types::api::BlockNumber; - use zksync_types::{Address, L2BlockNumber, H160, H256, U64}; - - use crate::testing; - - use super::*; - - #[test] - fn test_get_block_by_hash_full_is_cached() { - let input_block_hash = H256::repeat_byte(0x01); - let input_block_number = 8; - - let mock_server = testing::MockServer::run(); - mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "eth_getBlockByHash", - "params": [ - format!("{input_block_hash:#x}"), - true - ], - }), - testing::BlockResponseBuilder::new() - .set_hash(input_block_hash) - .set_number(input_block_number) - .build(), - ); - - let fork_source = HttpForkSource::new(mock_server.url(), CacheConfig::Memory); - - let actual_block = fork_source - .get_block_by_hash(input_block_hash, true) - .expect("failed fetching block by hash") - .expect("no block"); - - assert_eq!(input_block_hash, actual_block.hash); - assert_eq!(U64::from(input_block_number), actual_block.number); - - let actual_block = fork_source - .get_block_by_hash(input_block_hash, true) - .expect("failed fetching cached block by hash") - .expect("no block"); - - assert_eq!(input_block_hash, actual_block.hash); - assert_eq!(U64::from(input_block_number), actual_block.number); - } - - #[test] - fn test_get_block_by_hash_minimal_is_cached() { - let input_block_hash = H256::repeat_byte(0x01); - let input_block_number = 8; - - let mock_server = testing::MockServer::run(); - mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "eth_getBlockByHash", - "params": [ - format!("{input_block_hash:#x}"), - false - ], - }), - testing::BlockResponseBuilder::new() - .set_hash(input_block_hash) - .set_number(input_block_number) - .build(), - ); - - let fork_source = HttpForkSource::new(mock_server.url(), CacheConfig::Memory); - - let actual_block = fork_source - .get_block_by_hash(input_block_hash, false) - .expect("failed fetching block by hash") - .expect("no block"); - - assert_eq!(input_block_hash, actual_block.hash); - assert_eq!(U64::from(input_block_number), actual_block.number); - - let actual_block = fork_source - .get_block_by_hash(input_block_hash, false) - .expect("failed fetching cached block by hash") - .expect("no block"); - - assert_eq!(input_block_hash, actual_block.hash); - assert_eq!(U64::from(input_block_number), actual_block.number); - } - - #[test] - fn test_get_block_by_number_full_is_cached() { - let input_block_hash = H256::repeat_byte(0x01); - let input_block_number = 8; - - let mock_server = testing::MockServer::run(); - mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "eth_getBlockByNumber", - "params": [ - format!("{input_block_number:#x}"), - true - ], - }), - testing::BlockResponseBuilder::new() - .set_hash(input_block_hash) - .set_number(input_block_number) - .build(), - ); - - let fork_source = HttpForkSource::new(mock_server.url(), CacheConfig::Memory); - - let actual_block = fork_source - .get_block_by_number( - zksync_types::api::BlockNumber::Number(U64::from(input_block_number)), - true, - ) - .expect("failed fetching block by number") - .expect("no block"); - - assert_eq!(input_block_hash, actual_block.hash); - assert_eq!(U64::from(input_block_number), actual_block.number); - - let actual_block = fork_source - .get_block_by_number( - zksync_types::api::BlockNumber::Number(U64::from(input_block_number)), - true, - ) - .expect("failed fetching cached block by number") - .expect("no block"); - - assert_eq!(input_block_hash, actual_block.hash); - assert_eq!(U64::from(input_block_number), actual_block.number); - } - - #[test] - fn test_get_block_by_number_minimal_is_cached() { - let input_block_hash = H256::repeat_byte(0x01); - let input_block_number = 8; - - let mock_server = testing::MockServer::run(); - mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "eth_getBlockByNumber", - "params": [ - format!("{input_block_number:#x}"), - false - ], - }), - testing::BlockResponseBuilder::new() - .set_hash(input_block_hash) - .set_number(input_block_number) - .build(), - ); - - let fork_source = HttpForkSource::new(mock_server.url(), CacheConfig::Memory); - - let actual_block = fork_source - .get_block_by_number(BlockNumber::Number(U64::from(input_block_number)), false) - .expect("failed fetching block by number") - .expect("no block"); - - assert_eq!(input_block_hash, actual_block.hash); - assert_eq!(U64::from(input_block_number), actual_block.number); - - let actual_block = fork_source - .get_block_by_number(BlockNumber::Number(U64::from(input_block_number)), false) - .expect("failed fetching cached block by number") - .expect("no block"); - - assert_eq!(input_block_hash, actual_block.hash); - assert_eq!(U64::from(input_block_number), actual_block.number); - } - - #[test] - fn test_get_raw_block_transactions_is_cached() { - let input_block_number = 8u32; - - let mock_server = testing::MockServer::run(); - mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "zks_getRawBlockTransactions", - "params": [ - input_block_number, - ], - }), - testing::RawTransactionsResponseBuilder::new() - .add(1) - .build(), - ); - - let fork_source = HttpForkSource::new(mock_server.url(), CacheConfig::Memory); - - let actual_raw_transactions = fork_source - .get_raw_block_transactions(L2BlockNumber(input_block_number)) - .expect("failed fetching block raw transactions"); - assert_eq!(1, actual_raw_transactions.len()); - - let actual_raw_transactions = fork_source - .get_raw_block_transactions(L2BlockNumber(input_block_number)) - .expect("failed fetching cached block raw transactions"); - assert_eq!(1, actual_raw_transactions.len()); - } - - #[test] - fn test_get_transactions_is_cached() { - let input_tx_hash = H256::repeat_byte(0x01); - - let mock_server = testing::MockServer::run(); - mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "eth_getTransactionByHash", - "params": [ - input_tx_hash, - ], - }), - testing::TransactionResponseBuilder::new() - .set_hash(input_tx_hash) - .build(), - ); - - let fork_source = HttpForkSource::new(mock_server.url(), CacheConfig::Memory); - - let actual_transaction = fork_source - .get_transaction_by_hash(input_tx_hash) - .expect("failed fetching transaction") - .expect("no transaction"); - assert_eq!(input_tx_hash, actual_transaction.hash); - - let actual_transaction = fork_source - .get_transaction_by_hash(input_tx_hash) - .expect("failed fetching cached transaction") - .expect("no transaction"); - assert_eq!(input_tx_hash, actual_transaction.hash); - } - - #[test] - fn test_get_transaction_details() { - let input_tx_hash = H256::repeat_byte(0x01); - let mock_server = testing::MockServer::run(); - mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "zks_getTransactionDetails", - "params": [ - input_tx_hash, - ], - }), - serde_json::json!({ - "jsonrpc": "2.0", - "result": { - "isL1Originated": false, - "status": "included", - "fee": "0x74293f087500", - "gasPerPubdata": "0x4e20", - "initiatorAddress": "0x63ab285cd87a189f345fed7dd4e33780393e01f0", - "receivedAt": "2023-10-12T15:45:53.094Z", - "ethCommitTxHash": null, - "ethProveTxHash": null, - "ethExecuteTxHash": null - }, - "id": 0 - }), - ); - - let fork_source = HttpForkSource::new(mock_server.url(), CacheConfig::Memory); - let transaction_details = fork_source - .get_transaction_details(input_tx_hash) - .expect("failed fetching transaction") - .expect("no transaction"); - assert_eq!( - transaction_details.initiator_address, - Address::from_str("0x63ab285cd87a189f345fed7dd4e33780393e01f0").unwrap() - ); - } - - #[test] - fn test_get_block_details() { - let miniblock = L2BlockNumber::from(16474138); - let mock_server = testing::MockServer::run(); - mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "zks_getBlockDetails", - "params": [ - miniblock.0, - ], - }), - serde_json::json!({ - "jsonrpc": "2.0", - "result": { - "number": 16474138, - "l1BatchNumber": 270435, - "timestamp": 1697405098, - "l1TxCount": 0, - "l2TxCount": 1, - "rootHash": "0xd9e60f9a684fd7fc16e87ae923341a6e4af24f286e76612efdfc2d55f3f4d064", - "status": "sealed", - "commitTxHash": null, - "committedAt": null, - "proveTxHash": null, - "provenAt": null, - "executeTxHash": null, - "executedAt": null, - "l1GasPrice": 6156252068u64, - "l2FairGasPrice": 50000000u64, - "fairPubdataPrice": 100u64, - "baseSystemContractsHashes": { - "bootloader": "0x0100089b8a2f2e6a20ba28f02c9e0ed0c13d702932364561a0ea61621f65f0a8", - "default_aa": "0x0100067d16a5485875b4249040bf421f53e869337fe118ec747cf40a4c777e5f" - }, - "operatorAddress": "0xa9232040bf0e0aea2578a5b2243f2916dbfc0a69", - "protocolVersion": "Version15", - }, - "id": 0 - }), - ); - - let fork_source = HttpForkSource::new(mock_server.url(), CacheConfig::Memory); - let block_details = fork_source - .get_block_details(miniblock) - .expect("failed fetching transaction") - .expect("no transaction"); - assert_eq!( - block_details.operator_address, - Address::from_str("0xa9232040bf0e0aea2578a5b2243f2916dbfc0a69").unwrap() - ); - } - - #[test] - fn test_get_bridge_contracts_is_cached() { - let input_bridge_addresses = BridgeAddresses { - l1_erc20_default_bridge: Some(H160::repeat_byte(0x1)), - l2_erc20_default_bridge: Some(H160::repeat_byte(0x2)), - l1_shared_default_bridge: Some(H160::repeat_byte(0x5)), - l2_shared_default_bridge: Some(H160::repeat_byte(0x2)), - l1_weth_bridge: Some(H160::repeat_byte(0x3)), - l2_weth_bridge: Some(H160::repeat_byte(0x4)), - l2_legacy_shared_bridge: Some(H160::repeat_byte(0x6)), - }; - let mock_server = testing::MockServer::run(); - mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "zks_getBridgeContracts", - }), - serde_json::json!({ - "jsonrpc": "2.0", - "result": { - "l1Erc20SharedBridge": format!("{:#x}", input_bridge_addresses.l1_shared_default_bridge.unwrap()), - "l2Erc20SharedBridge": format!("{:#x}", input_bridge_addresses.l2_shared_default_bridge.unwrap()), - "l1Erc20DefaultBridge": format!("{:#x}", input_bridge_addresses.l1_erc20_default_bridge.unwrap()), - "l2Erc20DefaultBridge": format!("{:#x}", input_bridge_addresses.l2_erc20_default_bridge.unwrap()), - "l1WethBridge": format!("{:#x}", input_bridge_addresses.l1_weth_bridge.unwrap()), - "l2WethBridge": format!("{:#x}", input_bridge_addresses.l2_weth_bridge.unwrap()) - }, - "id": 0 - }), - ); - - let fork_source = HttpForkSource::new(mock_server.url(), CacheConfig::Memory); - - let actual_bridge_addresses = fork_source - .get_bridge_contracts() - .expect("failed fetching bridge addresses"); - testing::assert_bridge_addresses_eq(&input_bridge_addresses, &actual_bridge_addresses); - - let actual_bridge_addresses = fork_source - .get_bridge_contracts() - .expect("failed fetching bridge addresses"); - testing::assert_bridge_addresses_eq(&input_bridge_addresses, &actual_bridge_addresses); - } - - #[test] - fn test_get_confirmed_tokens_is_cached() { - let mock_server = testing::MockServer::run(); - mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "zks_getConfirmedTokens", - "params": [0, 100] - }), - serde_json::json!({ - "jsonrpc": "2.0", - "result": [ - { - "decimals": 18, - "l1Address": "0xbe9895146f7af43049ca1c1ae358b0541ea49704", - "l2Address": "0x75af292c1c9a37b3ea2e6041168b4e48875b9ed5", - "name": "Coinbase Wrapped Staked ETH", - "symbol": "cbETH" - } - ], - "id": 0 - }), - ); - - let fork_source = HttpForkSource::new(mock_server.url(), CacheConfig::Memory); - - let tokens = fork_source - .get_confirmed_tokens(0, 100) - .expect("failed fetching tokens"); - assert_eq!(tokens.len(), 1); - assert_eq!(tokens[0].symbol, "cbETH"); - - let tokens = fork_source - .get_confirmed_tokens(0, 100) - .expect("failed fetching tokens"); - assert_eq!(tokens.len(), 1); - } -} diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index 674fdf15..70f6a911 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -46,7 +46,6 @@ pub mod console_log; pub mod deps; pub mod filters; pub mod formatter; -pub mod http_fork_source; pub mod node; pub mod observability; pub mod resolver; diff --git a/crates/core/src/node/eth.rs b/crates/core/src/node/eth.rs index cdfabbf6..11585abb 100644 --- a/crates/core/src/node/eth.rs +++ b/crates/core/src/node/eth.rs @@ -189,21 +189,7 @@ impl InMemoryNode { if let Some(block) = self.blockchain.get_block_by_id(block_id).await { Some(block) } else { - self.inner - .read() - .await - .fork_storage - .inner - .read() - .expect("failed reading fork storage") - .fork - .as_ref() - .and_then(|fork| { - fork.fork_source - .get_block_by_id(block_id, true) - .ok() - .flatten() - }) + self.fork.get_block_by_id(block_id).await? } }; @@ -284,22 +270,7 @@ impl InMemoryNode { // try retrieving transaction from memory, and if unavailable subsequently from the fork match self.blockchain.get_tx_api(&hash).await? { Some(tx) => Ok(Some(tx)), - None => Ok(self - .inner - .read() - .await - .fork_storage - .inner - .read() - .expect("failed reading fork storage") - .fork - .as_ref() - .and_then(|fork| { - fork.fork_source - .get_transaction_by_hash(hash) - .ok() - .flatten() - })), + None => self.fork.get_transaction_by_hash(hash).await, } } @@ -435,30 +406,18 @@ impl InMemoryNode { &self, block_id: api::BlockId, ) -> Result, Web3Error> { - let result = self.blockchain.get_block_tx_count_by_id(block_id).await; - let result = match result { - Some(result) => Some(U256::from(result)), - None => self - .inner - .read() - .await - .fork_storage - .inner - .read() - .expect("failed reading fork storage") - .fork - .as_ref() - .and_then(|fork| { - fork.fork_source - .get_block_transaction_count_by_id(block_id) - .ok() - .flatten() - }), + let count = match self.blockchain.get_block_tx_count_by_id(block_id).await { + Some(count) => Some(U256::from(count)), + None => { + self.fork + .get_block_transaction_count_by_id(block_id) + .await? + } }; // TODO: Is this right? What is the purpose of having `Option` here then? - match result { - Some(value) => Ok(Some(value)), + match count { + Some(count) => Ok(Some(count)), None => Err(Web3Error::NoBlock), } } @@ -481,31 +440,18 @@ impl InMemoryNode { block_id: api::BlockId, index: web3::Index, ) -> anyhow::Result> { - let tx = self + match self .blockchain .get_block_tx_by_id(block_id, index.as_usize()) - .await; - let maybe_tx = match tx { - Some(tx) => Some(tx), - None => self - .inner - .read() - .await - .fork_storage - .inner - .read() - .expect("failed reading fork storage") - .fork - .as_ref() - .and_then(|fork| { - fork.fork_source - .get_transaction_by_block_id_and_index(block_id, index) - .ok() - }) - .flatten(), - }; - - Ok(maybe_tx) + .await + { + Some(tx) => Ok(Some(tx)), + None => { + self.fork + .get_transaction_by_block_id_and_index(block_id, index) + .await + } + } } pub fn protocol_version_impl(&self) -> String { @@ -576,7 +522,7 @@ impl InMemoryNode { #[cfg(test)] mod tests { use super::*; - use crate::node::fork::ForkDetails; + use crate::node::fork::{ForkClient, ForkConfig}; use crate::node::TransactionResult; use crate::{ node::{compute_hash, InMemoryNode}, @@ -588,8 +534,8 @@ mod tests { use anvil_zksync_config::constants::{ DEFAULT_ACCOUNT_BALANCE, DEFAULT_L2_GAS_PRICE, NON_FORK_FIRST_BLOCK_TIMESTAMP, }; - use anvil_zksync_config::types::CacheConfig; use maplit::hashmap; + use url::Url; use zksync_multivm::utils::get_max_batch_gas_limit; use zksync_types::l2::TransactionType; use zksync_types::vm::VmVersion; @@ -602,9 +548,9 @@ mod tests { use zksync_types::{u256_to_h256, web3, AccountTreeId, Nonce}; use zksync_web3_decl::types::{SyncState, ValueOrArray}; - async fn test_node(url: &str) -> InMemoryNode { + async fn test_node(url: Url) -> InMemoryNode { InMemoryNode::test(Some( - ForkDetails::from_network(url, None, &CacheConfig::None) + ForkClient::at_block_number(ForkConfig::unknown(url), None) .await .unwrap(), )) @@ -820,7 +766,7 @@ mod tests { transaction_count: 0, }); - let node = test_node(&mock_server.url()).await; + let node = test_node(mock_server.url()).await; assert!( node.blockchain .get_block_by_hash(&input_block_hash) @@ -843,20 +789,13 @@ mod tests { let block_response = testing::BlockResponseBuilder::new() .set_hash(input_block_hash) .set_number(mock_block_number) - .build(); + .build_result(); mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "eth_getBlockByHash", - "params": [ - format!("{input_block_hash:#x}"), - true - ], - }), + "eth_getBlockByHash", + Some(serde_json::json!([format!("{input_block_hash:#x}"), true])), block_response, ); - let node = test_node(&mock_server.url()).await; + let node = test_node(mock_server.url()).await; let actual_block = node .get_block_impl(api::BlockId::Hash(input_block_hash), false) @@ -1002,20 +941,13 @@ mod tests { let mock_block_number = 8; let block_response = testing::BlockResponseBuilder::new() .set_number(mock_block_number) - .build(); + .build_result(); mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "eth_getBlockByNumber", - "params": [ - "0x8", - true - ], - }), + "eth_getBlockByNumber", + Some(serde_json::json!(["0x8", true])), block_response, ); - let node = test_node(&mock_server.url()).await; + let node = test_node(mock_server.url()).await; let actual_block = node .get_block_impl( @@ -1065,7 +997,7 @@ mod tests { transaction_count: 0, }); - let node = test_node(&mock_server.url()).await; + let node = test_node(mock_server.url()).await; let actual_block = node .get_block_impl(api::BlockId::Number(BlockNumber::Latest), false) @@ -1084,20 +1016,13 @@ mod tests { }); let input_block_number = 1; mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "eth_getBlockByNumber", - "params": [ - "earliest", - true - ], - }), + "eth_getBlockByNumber", + Some(serde_json::json!(["earliest", true])), testing::BlockResponseBuilder::new() .set_number(input_block_number) - .build(), + .build_result(), ); - let node = test_node(&mock_server.url()).await; + let node = test_node(mock_server.url()).await; let actual_block = node .get_block_impl(api::BlockId::Number(BlockNumber::Earliest), false) @@ -1120,7 +1045,7 @@ mod tests { hash: H256::repeat_byte(0xab), transaction_count: 0, }); - let node = test_node(&mock_server.url()).await; + let node = test_node(mock_server.url()).await; let actual_block = node .get_block_impl(api::BlockId::Number(block_number), false) @@ -1160,21 +1085,11 @@ mod tests { let input_block_hash = H256::repeat_byte(0x01); let input_transaction_count = 1; mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "eth_getBlockTransactionCountByHash", - "params": [ - format!("{:#x}", input_block_hash), - ], - }), - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "result": format!("{:#x}", input_transaction_count), - }), + "eth_getBlockTransactionCountByHash", + Some(serde_json::json!([format!("{:#x}", input_block_hash)])), + serde_json::json!(format!("{:#x}", input_transaction_count)), ); - let node = test_node(&mock_server.url()).await; + let node = test_node(mock_server.url()).await; let actual_transaction_count = node .get_block_transaction_count_impl(api::BlockId::Hash(input_block_hash)) @@ -1214,22 +1129,12 @@ mod tests { let input_block_number = 1; let input_transaction_count = 1; mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "eth_getBlockTransactionCountByNumber", - "params": [ - format!("{:#x}", input_block_number), - ], - }), - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "result": format!("{:#x}", input_transaction_count), - }), + "eth_getBlockTransactionCountByNumber", + Some(serde_json::json!([format!("{:#x}", input_block_number)])), + serde_json::json!(format!("{:#x}", input_transaction_count)), ); - let node = test_node(&mock_server.url()).await; + let node = test_node(mock_server.url()).await; let actual_transaction_count = node .get_block_transaction_count_impl(api::BlockId::Number(BlockNumber::Number(U64::from( @@ -1254,22 +1159,12 @@ mod tests { }); let input_transaction_count = 1; mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "eth_getBlockTransactionCountByNumber", - "params": [ - "earliest", - ], - }), - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "result": format!("{:#x}", input_transaction_count), - }), + "eth_getBlockTransactionCountByNumber", + Some(serde_json::json!(["earliest"])), + serde_json::json!(format!("{:#x}", input_transaction_count)), ); - let node = test_node(&mock_server.url()).await; + let node = test_node(mock_server.url()).await; let actual_transaction_count = node .get_block_transaction_count_impl(api::BlockId::Number(BlockNumber::Earliest)) @@ -1298,7 +1193,7 @@ mod tests { hash: H256::repeat_byte(0xab), }); - let node = test_node(&mock_server.url()).await; + let node = test_node(mock_server.url()).await; let actual_transaction_count = node .get_block_transaction_count_impl(api::BlockId::Number(block_number)) @@ -1546,24 +1441,16 @@ mod tests { let input_address = H160::repeat_byte(0x1); let input_storage_value = H256::repeat_byte(0xcd); mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "eth_getStorageAt", - "params": [ - format!("{:#x}", input_address), - "0x0", - { "blockNumber": "0x2" }, - ], - }), - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "result": format!("{:#x}", input_storage_value), - }), + "eth_getStorageAt", + Some(serde_json::json!([ + format!("{:#x}", input_address), + "0x0", + { "blockNumber": "0x2" }, + ])), + serde_json::json!(format!("{:#x}", input_storage_value)), ); - let node = test_node(&mock_server.url()).await; + let node = test_node(mock_server.url()).await; let actual_value = node .get_storage_impl( @@ -1627,7 +1514,8 @@ mod tests { assert_eq!(input_storage_value, actual_value); } - #[tokio::test] + // FIXME: Multi-threaded flavor is needed because of the `block_on` mess inside `ForkStorage`. + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_get_storage_uses_fork_to_get_value_for_latest_block_for_missing_key() { let mock_server = MockServer::run_with_config(ForkBlockConfig { number: 10, @@ -1637,24 +1525,16 @@ mod tests { let input_address = H160::repeat_byte(0x1); let input_storage_value = H256::repeat_byte(0xcd); mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "eth_getStorageAt", - "params": [ - format!("{:#x}", input_address), - "0x0", - "0xa", - ], - }), - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "result": format!("{:#x}", input_storage_value), - }), + "eth_getStorageAt", + Some(serde_json::json!([ + format!("{:#x}", input_address), + "0x0", + "0xa", + ])), + serde_json::json!(format!("{:#x}", input_storage_value)), ); - let node = test_node(&mock_server.url()).await; + let node = test_node(mock_server.url()).await; { let mut writer = node.inner.write().await; let historical_block = Block:: { @@ -2027,23 +1907,19 @@ mod tests { let input_block_hash = H256::repeat_byte(0x01); let input_tx_hash = H256::repeat_byte(0x02); mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "eth_getTransactionByBlockHashAndIndex", - "params": [ - format!("{:#x}", input_block_hash), - "0x1" - ], - }), + "eth_getTransactionByBlockHashAndIndex", + Some(serde_json::json!([ + format!("{:#x}", input_block_hash), + "0x1" + ])), TransactionResponseBuilder::new() .set_hash(input_tx_hash) .set_block_hash(input_block_hash) .set_block_number(U64::from(100)) - .build(), + .build_result(), ); - let node = test_node(&mock_server.url()).await; + let node = test_node(mock_server.url()).await; let actual_tx = node .get_transaction_by_block_and_index_impl( @@ -2124,23 +2000,19 @@ mod tests { let input_block_number = U64::from(100); let input_tx_hash = H256::repeat_byte(0x02); mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "eth_getTransactionByBlockNumberAndIndex", - "params": [ - format!("{:#x}", input_block_number), - "0x1" - ], - }), + "eth_getTransactionByBlockNumberAndIndex", + Some(serde_json::json!([ + format!("{:#x}", input_block_number), + "0x1" + ])), TransactionResponseBuilder::new() .set_hash(input_tx_hash) .set_block_hash(input_block_hash) .set_block_number(input_block_number) - .build(), + .build_result(), ); - let node = test_node(&mock_server.url()).await; + let node = test_node(mock_server.url()).await; let actual_tx = node .get_transaction_by_block_and_index_impl( diff --git a/crates/core/src/node/fee_model.rs b/crates/core/src/node/fee_model.rs index 97dab926..12d401a4 100644 --- a/crates/core/src/node/fee_model.rs +++ b/crates/core/src/node/fee_model.rs @@ -4,7 +4,7 @@ use zksync_types::fee_model::{ BaseTokenConversionRatio, BatchFeeInput, FeeModelConfigV2, FeeParams, FeeParamsV2, }; -use super::inner::fork::ForkDetails; +use crate::node::fork::ForkDetails; use anvil_zksync_config::constants::{ DEFAULT_ESTIMATE_GAS_PRICE_SCALE_FACTOR, DEFAULT_ESTIMATE_GAS_SCALE_FACTOR, DEFAULT_FAIR_PUBDATA_PRICE, DEFAULT_L1_GAS_PRICE, DEFAULT_L2_GAS_PRICE, @@ -44,18 +44,11 @@ impl PartialEq for TestNodeFeeInputProvider { impl TestNodeFeeInputProvider { pub fn from_fork(fork: Option<&ForkDetails>) -> Self { if let Some(fork) = fork { - if let Some(params) = fork.fee_params { - TestNodeFeeInputProvider::from_fee_params_and_estimate_scale_factors( - params, - fork.estimate_gas_price_scale_factor, - fork.estimate_gas_scale_factor, - ) - } else { - TestNodeFeeInputProvider::from_estimate_scale_factors( - fork.estimate_gas_price_scale_factor, - fork.estimate_gas_scale_factor, - ) - } + TestNodeFeeInputProvider::from_fee_params_and_estimate_scale_factors( + fork.fee_params, + fork.estimate_gas_price_scale_factor, + fork.estimate_gas_scale_factor, + ) } else { TestNodeFeeInputProvider::default() } diff --git a/crates/core/src/node/in_memory.rs b/crates/core/src/node/in_memory.rs index b9fa6782..d19027fa 100644 --- a/crates/core/src/node/in_memory.rs +++ b/crates/core/src/node/in_memory.rs @@ -1,5 +1,4 @@ //! In-memory node, that supports forking other networks. -use super::inner::fork::ForkDetails; use super::inner::node_executor::NodeExecutorHandle; use super::inner::InMemoryNodeInner; use super::vm::AnvilVM; @@ -19,10 +18,8 @@ use crate::node::{BlockSealer, BlockSealerMode, NodeExecutor, TxPool}; use crate::observability::Observability; use crate::system_contracts::SystemContracts; use crate::{delegate_vm, formatter}; -use anvil_zksync_config::constants::{ - LEGACY_RICH_WALLETS, NON_FORK_FIRST_BLOCK_TIMESTAMP, RICH_WALLETS, TEST_NODE_NETWORK_ID, -}; -use anvil_zksync_config::types::Genesis; +use anvil_zksync_config::constants::{NON_FORK_FIRST_BLOCK_TIMESTAMP, TEST_NODE_NETWORK_ID}; +use anvil_zksync_config::types::{CacheConfig, Genesis}; use anvil_zksync_config::TestNodeConfig; use anvil_zksync_types::{LogLevel, ShowCalls, ShowGasDetails, ShowStorageLogs, ShowVMDetails}; use colored::Colorize; @@ -34,7 +31,6 @@ use once_cell::sync::OnceCell; use serde::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; use std::io::{Read, Write}; -use std::str::FromStr; use std::sync::Arc; use tokio::sync::RwLock; use zksync_contracts::BaseSystemContracts; @@ -47,6 +43,7 @@ use zksync_multivm::tracers::CallTracer; use zksync_multivm::utils::{get_batch_base_fee, get_max_batch_gas_limit}; use zksync_multivm::vm_latest::Vm; +use crate::node::fork::{ForkClient, ForkSource}; use crate::node::keys::StorageKeyLayout; use zksync_multivm::vm_latest::{HistoryDisabled, ToTracerPointer}; use zksync_multivm::VmVersion; @@ -249,6 +246,7 @@ pub struct InMemoryNode { pub(crate) inner: Arc>, pub(crate) blockchain: Box, pub(crate) storage: Box, + pub(crate) fork: Box, pub(crate) node_handle: NodeExecutorHandle, /// List of snapshots of the [InMemoryNodeInner]. This is bounded at runtime by [MAX_SNAPSHOTS]. pub(crate) snapshots: Arc>>, @@ -268,6 +266,7 @@ impl InMemoryNode { inner: Arc>, blockchain: Box, storage: Box, + fork: Box, node_handle: NodeExecutorHandle, observability: Option, time: Box, @@ -281,6 +280,7 @@ impl InMemoryNode { inner, blockchain, storage, + fork, node_handle, snapshots: Default::default(), time, @@ -293,29 +293,6 @@ impl InMemoryNode { } } - pub async fn reset(&self, fork: Option) -> Result<(), String> { - self.inner.write().await.reset(fork).await; - self.snapshots.write().await.clear(); - - for wallet in LEGACY_RICH_WALLETS.iter() { - let address = wallet.0; - self.set_rich_account( - H160::from_str(address).unwrap(), - U256::from(100u128 * 10u128.pow(18)), - ) - .await; - } - for wallet in RICH_WALLETS.iter() { - let address = wallet.0; - self.set_rich_account( - H160::from_str(address).unwrap(), - U256::from(100u128 * 10u128.pow(18)), - ) - .await; - } - Ok(()) - } - /// Applies multiple transactions across multiple blocks. All transactions are expected to be /// executable. Note that on error this method may leave node in partially applied state (i.e. /// some txs have been applied while others have not). @@ -645,8 +622,10 @@ pub fn load_last_l1_batch(storage: StoragePtr) -> Option<(u64 // #[cfg(test)] // TODO: Mark with #[cfg(test)] once it is not used in other modules impl InMemoryNode { - pub fn test_config(fork: Option, config: TestNodeConfig) -> Self { - let fee_provider = TestNodeFeeInputProvider::from_fork(fork.as_ref()); + pub fn test_config(fork_client_opt: Option, config: TestNodeConfig) -> Self { + let fee_provider = TestNodeFeeInputProvider::from_fork( + fork_client_opt.as_ref().map(|client| &client.details), + ); let impersonation = ImpersonationManager::default(); let system_contracts = SystemContracts::from_options( &config.system_contracts_options, @@ -658,8 +637,8 @@ impl InMemoryNode { } else { StorageKeyLayout::ZkEra }; - let (inner, storage, blockchain, time) = InMemoryNodeInner::init( - fork, + let (inner, storage, blockchain, time, fork) = InMemoryNodeInner::init( + fork_client_opt, fee_provider, Arc::new(RwLock::new(Default::default())), config, @@ -685,6 +664,7 @@ impl InMemoryNode { inner, blockchain, storage, + fork, node_handle, None, time, @@ -696,8 +676,11 @@ impl InMemoryNode { ) } - pub fn test(fork: Option) -> Self { - let config = TestNodeConfig::default(); - Self::test_config(fork, config) + pub fn test(fork_client_opt: Option) -> Self { + let config = TestNodeConfig { + cache_config: CacheConfig::None, + ..Default::default() + }; + Self::test_config(fork_client_opt, config) } } diff --git a/crates/core/src/node/in_memory_ext.rs b/crates/core/src/node/in_memory_ext.rs index d2c8889f..79ecc1de 100644 --- a/crates/core/src/node/in_memory_ext.rs +++ b/crates/core/src/node/in_memory_ext.rs @@ -1,14 +1,16 @@ -use super::inner::fork::ForkDetails; use super::pool::TxBatch; use super::sealer::BlockSealerMode; use super::InMemoryNode; +use anvil_zksync_config::constants::{LEGACY_RICH_WALLETS, RICH_WALLETS}; use anvil_zksync_types::api::{DetailedTransaction, ResetRequest}; use anyhow::{anyhow, Context}; +use std::str::FromStr; use std::time::Duration; +use url::Url; use zksync_types::api::{Block, TransactionVariant}; use zksync_types::bytecode::BytecodeHash; use zksync_types::u256_to_h256; -use zksync_types::{AccountTreeId, Address, L2BlockNumber, StorageKey, H256, U256, U64}; +use zksync_types::{AccountTreeId, Address, L2BlockNumber, StorageKey, H160, H256, U256, U64}; type Result = anyhow::Result; @@ -222,56 +224,48 @@ impl InMemoryNode { } pub async fn reset_network(&self, reset_spec: Option) -> Result { - let (opt_url, block_number) = if let Some(spec) = reset_spec { + if let Some(spec) = reset_spec { if let Some(to) = spec.to { if spec.forking.is_some() { return Err(anyhow!( "Only one of 'to' and 'forking' attributes can be specified" )); } - let url = match self.inner.read().await.fork_storage.get_fork_url() { - Ok(url) => url, - Err(error) => { - tracing::error!("For returning to past local state, mark it with `evm_snapshot`, then revert to it with `evm_revert`."); - return Err(anyhow!(error.to_string())); - } - }; - (Some(url), Some(to.as_u64())) + self.node_handle + .reset_fork_block_number_sync(L2BlockNumber(to.as_u32())) + .await?; } else if let Some(forking) = spec.forking { - let block_number = forking.block_number.map(|n| n.as_u64()); - (Some(forking.json_rpc_url), block_number) + let url = Url::from_str(&forking.json_rpc_url).context("malformed fork URL")?; + let block_number = forking.block_number.map(|n| L2BlockNumber(n.as_u32())); + self.node_handle.reset_fork_sync(url, block_number).await?; } else { - (None, None) + self.node_handle.remove_fork_sync().await?; } } else { - (None, None) - }; + self.node_handle.remove_fork_sync().await?; + } - let fork_details = if let Some(url) = opt_url { - let cache_config = self - .inner - .read() - .await - .fork_storage - .get_cache_config() - .map_err(|err| anyhow!(err))?; - match ForkDetails::from_url(url, block_number, cache_config) { - Ok(fd) => Some(fd), - Err(error) => { - return Err(anyhow!(error.to_string())); - } - } - } else { - None - }; + self.snapshots.write().await.clear(); - match self.reset(fork_details).await { - Ok(()) => { - tracing::info!("👷 Network reset"); - Ok(true) - } - Err(error) => Err(anyhow!(error.to_string())), + for wallet in LEGACY_RICH_WALLETS.iter() { + let address = wallet.0; + self.set_rich_account( + H160::from_str(address).unwrap(), + U256::from(100u128 * 10u128.pow(18)), + ) + .await; + } + for wallet in RICH_WALLETS.iter() { + let address = wallet.0; + self.set_rich_account( + H160::from_str(address).unwrap(), + U256::from(100u128 * 10u128.pow(18)), + ) + .await; } + tracing::info!("👷 Network reset"); + + Ok(true) } pub fn auto_impersonate_account(&self, enabled: bool) { @@ -400,20 +394,8 @@ impl InMemoryNode { } pub async fn set_rpc_url(&self, url: String) -> Result<()> { - let inner = self.inner.read().await; - let mut fork_storage = inner - .fork_storage - .inner - .write() - .map_err(|err| anyhow!("failed acquiring lock: {:?}", err))?; - if let Some(fork) = &mut fork_storage.fork { - let old_url = fork.fork_source.get_fork_url().map_err(|e| { - anyhow::anyhow!( - "failed to resolve current fork's RPC URL: {}", - e.to_string() - ) - })?; - fork.set_rpc_url(url.clone()); + let url = Url::from_str(&url).context("malformed fork URL")?; + if let Some(old_url) = self.node_handle.set_fork_url_sync(url.clone()).await? { tracing::info!("Updated fork rpc from \"{}\" to \"{}\"", old_url, url); } else { tracing::info!("Non-forking node tried to switch RPC URL to '{url}'. Call `anvil_reset` instead if you wish to switch to forking mode"); diff --git a/crates/core/src/node/inner/blockchain.rs b/crates/core/src/node/inner/blockchain.rs index 003e1ecd..c27ba59a 100644 --- a/crates/core/src/node/inner/blockchain.rs +++ b/crates/core/src/node/inner/blockchain.rs @@ -1,5 +1,5 @@ -use super::fork::ForkDetails; use crate::filters::LogFilter; +use crate::node::inner::fork::ForkDetails; use crate::node::time::{ReadTime, Time}; use crate::node::{compute_hash, create_genesis, create_genesis_from_json, TransactionResult}; use crate::utils::utc_datetime_from_epoch_ms; @@ -472,21 +472,21 @@ impl ReadBlockchain for Blockchain { impl Blockchain { pub(super) fn new( - fork: Option<&ForkDetails>, + fork_details: Option<&ForkDetails>, genesis: Option<&Genesis>, genesis_timestamp: Option, ) -> Blockchain { - let state = if let Some(fork) = fork { + let state = if let Some(fork_details) = fork_details { BlockchainState { - current_batch: fork.l1_block, - current_block: L2BlockNumber(fork.l2_miniblock as u32), - current_block_hash: fork.l2_miniblock_hash, + current_batch: fork_details.batch_number, + current_block: fork_details.block_number, + current_block_hash: fork_details.block_hash, tx_results: Default::default(), - blocks: HashMap::from_iter([(fork.l2_block.hash, fork.l2_block.clone())]), - hashes: HashMap::from_iter([( - fork.l2_block.number.as_u32().into(), - fork.l2_block.hash, + blocks: HashMap::from_iter([( + fork_details.block_hash, + fork_details.api_block.clone(), )]), + hashes: HashMap::from_iter([(fork_details.block_number, fork_details.block_hash)]), } } else { let block_hash = compute_hash(0, []); diff --git a/crates/core/src/node/inner/fork.rs b/crates/core/src/node/inner/fork.rs index 78c58549..acc6cba1 100644 --- a/crates/core/src/node/inner/fork.rs +++ b/crates/core/src/node/inner/fork.rs @@ -1,503 +1,192 @@ -//! This file hold tools used for test-forking other networks. -//! -//! There is ForkStorage (that is a wrapper over InMemoryStorage) -//! And ForkDetails - that parses network address and fork height from arguments. - -use crate::node::inner::storage::ReadStorageDyn; -use crate::utils::block_on; -use crate::{deps::InMemoryStorage, http_fork_source::HttpForkSource}; +use crate::cache::Cache; use anvil_zksync_config::constants::{ DEFAULT_ESTIMATE_GAS_PRICE_SCALE_FACTOR, DEFAULT_ESTIMATE_GAS_SCALE_FACTOR, - DEFAULT_FAIR_PUBDATA_PRICE, TEST_NODE_NETWORK_ID, + DEFAULT_FAIR_PUBDATA_PRICE, }; -use anvil_zksync_config::types::{CacheConfig, SystemContractsOptions}; +use anvil_zksync_config::types::CacheConfig; +use anyhow::Context; use async_trait::async_trait; -use eyre::eyre; -use serde::{Deserialize, Serialize}; -use std::collections::BTreeMap; -use std::iter::FromIterator; -use std::{ - collections::HashMap, - convert::{TryFrom, TryInto}, - fmt, - str::FromStr, - sync::{Arc, RwLock}, -}; -use zksync_multivm::interface::storage::ReadStorage; -use zksync_types::api::BlockId; -use zksync_types::web3::Bytes; -use zksync_types::{ - api, - api::{ - Block, BlockDetails, BlockIdVariant, BlockNumber, BridgeAddresses, Transaction, - TransactionDetails, TransactionVariant, - }, - fee_model::FeeParams, - get_system_context_key, - l2::L2Tx, - url::SensitiveUrl, - ProtocolVersionId, StorageKey, SYSTEM_CONTEXT_CHAIN_ID_POSITION, -}; -use zksync_types::{bytecode::BytecodeHash, h256_to_u256}; +use futures::TryFutureExt; +use itertools::Itertools; +use std::fmt; +use std::future::Future; +use std::sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard}; +use tracing::Instrument; +use url::Url; +use zksync_types::fee_model::{BaseTokenConversionRatio, FeeModelConfigV2, FeeParams, FeeParamsV2}; +use zksync_types::l2::L2Tx; +use zksync_types::url::SensitiveUrl; +use zksync_types::web3::Index; use zksync_types::{ - Address, L1BatchNumber, L2BlockNumber, L2ChainId, StorageValue, H256, U256, U64, + api, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, H256, U256, }; -use zksync_web3_decl::{ - client::{Client, L2}, - namespaces::ZksNamespaceClient, -}; -use zksync_web3_decl::{namespaces::EthNamespaceClient, types::Index}; - -/// The possible networks to fork from. -#[derive(Debug, Clone)] -pub enum ForkNetwork { - Mainnet, - SepoliaTestnet, - GoerliTestnet, - Other(String), -} - -impl ForkNetwork { - /// Return the URL for the underlying fork source. - pub fn to_url(&self) -> &str { - match self { - ForkNetwork::Mainnet => "https://mainnet.era.zksync.io:443", - ForkNetwork::SepoliaTestnet => "https://sepolia.era.zksync.dev:443", - ForkNetwork::GoerliTestnet => "https://testnet.era.zksync.dev:443", - ForkNetwork::Other(url) => url, - } - } - // TODO: This needs to be dynamic based on the network. - /// Returns the local gas scale factors currently in use by the upstream network. - pub fn local_gas_scale_factors(&self) -> (f64, f32) { - match self { - ForkNetwork::Mainnet => (1.5, 1.4), - ForkNetwork::SepoliaTestnet => (2.0, 1.3), - ForkNetwork::GoerliTestnet => (1.2, 1.2), - ForkNetwork::Other(_) => ( - DEFAULT_ESTIMATE_GAS_PRICE_SCALE_FACTOR, - DEFAULT_ESTIMATE_GAS_SCALE_FACTOR, - ), - } - } -} - -/// In memory storage, that allows 'forking' from other network. -/// If forking is enabled, it reads missing data from remote location. -/// S - is a struct that is used for source of the fork. -#[derive(Debug, Clone)] -pub struct ForkStorage { - pub inner: Arc>, - pub chain_id: L2ChainId, -} - -// TODO: Hide mutable state and mark everything with `pub(super)` -#[derive(Debug)] -pub struct ForkStorageInner { - // Underlying local storage - pub raw_storage: InMemoryStorage, - // Cache of data that was read from remote location. - pub(super) value_read_cache: HashMap, - // Cache of factory deps that were read from remote location. - pub(super) factory_dep_cache: HashMap>>, - // If set - it hold the necessary information on where to fetch the data. - // If not set - it will simply read from underlying storage. - pub fork: Option>, -} - -impl ForkStorage { - pub(super) fn new( - fork: Option, - system_contracts_options: &SystemContractsOptions, - use_evm_emulator: bool, - override_chain_id: Option, - ) -> Self { - let chain_id = if let Some(override_id) = override_chain_id { - L2ChainId::from(override_id) - } else { - fork.as_ref() - .and_then(|d| d.overwrite_chain_id) - .unwrap_or(L2ChainId::from(TEST_NODE_NETWORK_ID)) - }; - - ForkStorage { - inner: Arc::new(RwLock::new(ForkStorageInner { - raw_storage: InMemoryStorage::with_system_contracts_and_chain_id( - chain_id, - |b| BytecodeHash::for_bytecode(b).value(), - system_contracts_options, - use_evm_emulator, - ), - value_read_cache: Default::default(), - fork: fork.map(Box::new), - factory_dep_cache: Default::default(), - })), - chain_id, - } - } - - pub fn get_cache_config(&self) -> Result { - let reader = self - .inner - .read() - .map_err(|e| format!("Failed to acquire read lock: {}", e))?; - let cache_config = if let Some(ref fork_details) = reader.fork { - fork_details.cache_config.clone() - } else { - CacheConfig::default() - }; - Ok(cache_config) - } - - pub fn get_fork_url(&self) -> Result { - let reader = self - .inner - .read() - .map_err(|e| format!("Failed to acquire read lock: {}", e))?; - if let Some(ref fork_details) = reader.fork { - fork_details - .fork_source - .get_fork_url() - .map_err(|e| e.to_string()) - } else { - Err("not forked".to_string()) - } - } - - pub fn read_value_internal( - &self, - key: &StorageKey, - ) -> eyre::Result { - let mut mutator = self.inner.write().unwrap(); - let local_storage = mutator.raw_storage.read_value(key); - - if let Some(fork) = &mutator.fork { - if !H256::is_zero(&local_storage) { - return Ok(local_storage); - } - - if let Some(value) = mutator.value_read_cache.get(key) { - return Ok(*value); - } - let l2_miniblock = fork.l2_miniblock; - let key_ = *key; - - let result = fork.fork_source.get_storage_at( - *key_.account().address(), - h256_to_u256(*key_.key()), - Some(BlockIdVariant::BlockNumber(BlockNumber::Number(U64::from( - l2_miniblock, - )))), - )?; - - mutator.value_read_cache.insert(*key, result); - Ok(result) - } else { - Ok(local_storage) - } - } - - pub fn load_factory_dep_internal(&self, hash: H256) -> eyre::Result>> { - let mut mutator = self.inner.write().unwrap(); - let local_storage = mutator.raw_storage.load_factory_dep(hash); - if let Some(fork) = &mutator.fork { - if local_storage.is_some() { - return Ok(local_storage); - } - if let Some(value) = mutator.factory_dep_cache.get(&hash) { - return Ok(value.clone()); - } - - let result = fork.fork_source.get_bytecode_by_hash(hash)?; - mutator.factory_dep_cache.insert(hash, result.clone()); - Ok(result) - } else { - Ok(local_storage) - } - } - - /// Check if this is the first time when we're ever writing to this key. - /// This has impact on amount of pubdata that we have to spend for the write. - pub fn is_write_initial_internal(&self, key: &StorageKey) -> eyre::Result { - // Currently we don't have the zks API to return us the information on whether a given - // key was written to before a given block. - // This means, we have to depend on the following heuristic: we'll read the value of the slot. - // - if value != 0 -> this means that the slot was written to in the past (so we can return intitial_write = false) - // - but if the value = 0 - there is a chance, that slot was written to in the past - and later was reset. - // but unfortunately we cannot detect that with the current zks api, so we'll attempt to do it - // only on local storage. - let value = self.read_value_internal(key)?; - if value != H256::zero() { - return Ok(false); - } - - // If value was 0, there is still a chance, that the slot was written to in the past - and only now set to 0. - // We unfortunately don't have the API to check it on the fork, but we can at least try to check it on local storage. - let mut mutator = self - .inner - .write() - .map_err(|err| eyre!("failed acquiring write lock on fork storage: {:?}", err))?; - Ok(mutator.raw_storage.is_write_initial(key)) - } - - /// Retrieves the enumeration index for a given `key`. - fn get_enumeration_index_internal(&self, _key: &StorageKey) -> Option { - // TODO: Update this file to use proper enumeration index value once it's exposed for forks via API - Some(0_u64) - } - - /// Creates a serializable representation of current storage state. It will contain both locally - /// stored data and cached data read from the fork. - pub fn dump_state(&self) -> SerializableForkStorage { - let inner = self.inner.read().unwrap(); - let mut state = BTreeMap::from_iter(inner.value_read_cache.clone()); - state.extend(inner.raw_storage.state.clone()); - let mut factory_deps = BTreeMap::from_iter( - inner - .factory_dep_cache - .iter() - // Ignore cache misses - .filter_map(|(k, v)| v.as_ref().map(|v| (k, v))) - .map(|(k, v)| (*k, Bytes::from(v.clone()))), - ); - factory_deps.extend( - inner - .raw_storage - .factory_deps - .iter() - .map(|(k, v)| (*k, Bytes::from(v.clone()))), - ); - - SerializableForkStorage { - storage: SerializableStorage(state), - factory_deps, - } - } - - pub fn load_state(&self, state: SerializableForkStorage) { - tracing::trace!( - slots = state.storage.0.len(), - factory_deps = state.factory_deps.len(), - "loading fork storage from supplied state" - ); - let mut inner = self.inner.write().unwrap(); - inner.raw_storage.state.extend(state.storage.0); - inner - .raw_storage - .factory_deps - .extend(state.factory_deps.into_iter().map(|(k, v)| (k, v.0))); - } -} - -impl ReadStorage for ForkStorage { - fn is_write_initial(&mut self, key: &StorageKey) -> bool { - self.is_write_initial_internal(key).unwrap() - } - - fn load_factory_dep(&mut self, hash: H256) -> Option> { - self.load_factory_dep_internal(hash).unwrap() - } - - fn read_value(&mut self, key: &StorageKey) -> zksync_types::StorageValue { - self.read_value_internal(key).unwrap() - } - - fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { - self.get_enumeration_index_internal(key) - } -} - -impl ReadStorage for &ForkStorage { - fn read_value(&mut self, key: &StorageKey) -> zksync_types::StorageValue { - self.read_value_internal(key).unwrap() - } - - fn is_write_initial(&mut self, key: &StorageKey) -> bool { - self.is_write_initial_internal(key).unwrap() - } - - fn load_factory_dep(&mut self, hash: H256) -> Option> { - self.load_factory_dep_internal(hash).unwrap() - } - - fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { - self.get_enumeration_index_internal(key) - } -} - +use zksync_web3_decl::client::{DynClient, L2}; +use zksync_web3_decl::error::Web3Error; +use zksync_web3_decl::namespaces::{EthNamespaceClient, ZksNamespaceClient}; + +/// Trait that provides necessary data when forking a remote chain. +/// +/// Most methods' signatures are similar to corresponding methods from [`EthNamespaceClient`] and [`ZksNamespaceClient`] +/// but with domain-specific types where that makes sense. Additionally, return types are wrapped +/// into [`anyhow::Result`] to avoid leaking RPC-specific implementation details. #[async_trait] -impl ReadStorageDyn for ForkStorage { - fn dyn_cloned(&self) -> Box { - Box::new(self.clone()) - } - - async fn read_value_alt(&self, key: &StorageKey) -> anyhow::Result { - // TODO: Get rid of `block_on` inside to propagate asynchronous execution up to this level - self.read_value_internal(key) - .map_err(|e| anyhow::anyhow!("failed reading value: {:?}", e)) - } - - async fn load_factory_dep_alt(&self, hash: H256) -> anyhow::Result>> { - // TODO: Get rid of `block_on` inside to propagate asynchronous execution up to this level - self.load_factory_dep_internal(hash) - .map_err(|e| anyhow::anyhow!("failed to load factory dep: {:?}", e)) - } -} +pub trait ForkSource: fmt::Debug + Send + Sync { + /// Alternative for [`Clone::clone`] that is object safe. + fn dyn_cloned(&self) -> Box; -impl ForkStorage { - pub fn set_value(&self, key: StorageKey, value: zksync_types::StorageValue) { - let mut mutator = self.inner.write().unwrap(); - mutator.raw_storage.set_value(key, value) - } - pub fn store_factory_dep(&self, hash: H256, bytecode: Vec) { - let mut mutator = self.inner.write().unwrap(); - mutator.raw_storage.store_factory_dep(hash, bytecode) - } - pub fn set_chain_id(&mut self, id: L2ChainId) { - self.chain_id = id; - let mut mutator = self.inner.write().unwrap(); - if let Some(fork) = &mut mutator.fork { - fork.set_chain_id(id) - } - mutator.raw_storage.set_value( - get_system_context_key(SYSTEM_CONTEXT_CHAIN_ID_POSITION), - H256::from_low_u64_be(id.as_u64()), - ); - } -} + /// Human-readable description on the fork's origin. None if there is no fork. + fn url(&self) -> Option; -/// Trait that provides necessary data when -/// forking a remote chain. -/// The method signatures are similar to methods from ETHNamespace and ZKNamespace. -pub trait ForkSource { - /// Returns the forked URL. - fn get_fork_url(&self) -> eyre::Result; + /// Details on the fork's state at the moment when we forked from it. None if there is no fork. + fn details(&self) -> Option; - /// Returns the Storage value at a given index for given address. - fn get_storage_at( + /// Fetches fork's storage value at a given index for given address for the forked blocked. + async fn get_storage_at( &self, address: Address, idx: U256, - block: Option, - ) -> eyre::Result; + block: Option, + ) -> anyhow::Result; + + /// Fetches fork's storage value at a given index for given address for the forked blocked. + async fn get_storage_at_forked(&self, address: Address, idx: U256) -> anyhow::Result; /// Returns the bytecode stored under this hash (if available). - fn get_bytecode_by_hash(&self, hash: H256) -> eyre::Result>>; + async fn get_bytecode_by_hash(&self, hash: H256) -> anyhow::Result>>; - /// Returns the transaction for a given hash. - fn get_transaction_by_hash(&self, hash: H256) -> eyre::Result>; + /// Fetches fork's transaction for a given hash. + async fn get_transaction_by_hash(&self, hash: H256) + -> anyhow::Result>; - /// Returns the transaction details for a given hash. - fn get_transaction_details(&self, hash: H256) -> eyre::Result>; + /// Fetches fork's transaction details for a given hash. + async fn get_transaction_details( + &self, + hash: H256, + ) -> anyhow::Result>; - /// Gets all transactions that belong to a given miniblock. - fn get_raw_block_transactions( + /// Fetches fork's transactions that belong to a block with the given number. + async fn get_raw_block_transactions( &self, block_number: L2BlockNumber, - ) -> eyre::Result>; + ) -> anyhow::Result>; - /// Returns the block for a given hash. - fn get_block_by_hash( + /// Fetches fork's block for a given hash. + async fn get_block_by_hash( &self, hash: H256, - full_transactions: bool, - ) -> eyre::Result>>; + ) -> anyhow::Result>>; - /// Returns the block for a given number. - fn get_block_by_number( + /// Fetches fork's block for a given number. + async fn get_block_by_number( &self, - block_number: zksync_types::api::BlockNumber, - full_transactions: bool, - ) -> eyre::Result>>; + block_number: api::BlockNumber, + ) -> anyhow::Result>>; - fn get_block_by_id( + /// Fetches fork's block for a given id. + async fn get_block_by_id( &self, - block_id: zksync_types::api::BlockId, - full_transactions: bool, - ) -> eyre::Result>> { + block_id: api::BlockId, + ) -> anyhow::Result>> { match block_id { - BlockId::Hash(hash) => self.get_block_by_hash(hash, full_transactions), - BlockId::Number(number) => self.get_block_by_number(number, full_transactions), + api::BlockId::Hash(hash) => self.get_block_by_hash(hash).await, + api::BlockId::Number(number) => self.get_block_by_number(number).await, } } - /// Returns the block details for a given miniblock number. - fn get_block_details(&self, miniblock: L2BlockNumber) -> eyre::Result>; - - /// Returns fee parameters for the give source. - fn get_fee_params(&self) -> eyre::Result; + /// Fetches fork's block details for a given block number. + async fn get_block_details( + &self, + block_number: L2BlockNumber, + ) -> anyhow::Result>; - /// Returns the transaction count for a given block hash. - fn get_block_transaction_count_by_hash(&self, block_hash: H256) -> eyre::Result>; + /// Fetches fork's transaction count for a given block hash. + async fn get_block_transaction_count_by_hash( + &self, + block_hash: H256, + ) -> anyhow::Result>; - /// Returns the transaction count for a given block number. - fn get_block_transaction_count_by_number( + /// Fetches fork's transaction count for a given block number. + async fn get_block_transaction_count_by_number( &self, - block_number: zksync_types::api::BlockNumber, - ) -> eyre::Result>; + block_number: api::BlockNumber, + ) -> anyhow::Result>; - fn get_block_transaction_count_by_id( + /// Fetches fork's transaction count for a given block id. + async fn get_block_transaction_count_by_id( &self, block_id: api::BlockId, - ) -> eyre::Result> { + ) -> anyhow::Result> { match block_id { - BlockId::Hash(hash) => self.get_block_transaction_count_by_hash(hash), - BlockId::Number(number) => self.get_block_transaction_count_by_number(number), + api::BlockId::Hash(hash) => self.get_block_transaction_count_by_hash(hash).await, + api::BlockId::Number(number) => { + self.get_block_transaction_count_by_number(number).await + } } } - /// Returns information about a transaction by block hash and transaction index position. - fn get_transaction_by_block_hash_and_index( + /// Fetches fork's transaction by block hash and transaction index position. + async fn get_transaction_by_block_hash_and_index( &self, block_hash: H256, index: Index, - ) -> eyre::Result>; + ) -> anyhow::Result>; - /// Returns information about a transaction by block number and transaction index position. - fn get_transaction_by_block_number_and_index( + /// Fetches fork's transaction by block number and transaction index position. + async fn get_transaction_by_block_number_and_index( &self, - block_number: BlockNumber, + block_number: api::BlockNumber, index: Index, - ) -> eyre::Result>; + ) -> anyhow::Result>; - fn get_transaction_by_block_id_and_index( + /// Fetches fork's transaction by block id and transaction index position. + async fn get_transaction_by_block_id_and_index( &self, block_id: api::BlockId, index: Index, - ) -> eyre::Result> { + ) -> anyhow::Result> { match block_id { - BlockId::Hash(hash) => self.get_transaction_by_block_hash_and_index(hash, index), - BlockId::Number(number) => { + api::BlockId::Hash(hash) => { + self.get_transaction_by_block_hash_and_index(hash, index) + .await + } + api::BlockId::Number(number) => { self.get_transaction_by_block_number_and_index(number, index) + .await } } } - /// Returns addresses of the default bridge contracts. - fn get_bridge_contracts(&self) -> eyre::Result; + /// Fetches fork's addresses of the default bridge contracts. + async fn get_bridge_contracts(&self) -> anyhow::Result>; - /// Returns confirmed tokens - fn get_confirmed_tokens( + /// Fetches fork's confirmed tokens. + async fn get_confirmed_tokens( &self, from: u32, limit: u8, - ) -> eyre::Result>; + ) -> anyhow::Result>>; } -/// Holds the information about the original chain. +impl Clone for Box { + fn clone(&self) -> Self { + self.dyn_cloned() + } +} + +#[derive(Debug, Clone)] pub struct ForkDetails { - // Source of the fork data (for example HttpForkSource) - pub fork_source: Box, - // Chain ID of fork + /// Chain ID of the fork. pub chain_id: L2ChainId, - // Block number at which we forked (the next block to create is l1_block + 1) - pub l1_block: L1BatchNumber, - // The actual L2 block - pub l2_block: zksync_types::api::Block, - pub l2_miniblock: u64, - pub l2_miniblock_hash: H256, + /// Batch number at which we forked (the next batch to seal locally is `batch_number + 1`). + pub batch_number: L1BatchNumber, + /// Block number at which we forked (the next block to seal locally is `block_number + 1`). + pub block_number: L2BlockNumber, + /// Block hash at which we forked (corresponds to the hash of block #`block_number`). + pub block_hash: H256, + /// Block timestamp at which we forked (corresponds to the timestamp of block #`block_number`). pub block_timestamp: u64, - pub overwrite_chain_id: Option, + /// API block at which we forked (corresponds to the hash of block #`block_number`). + pub api_block: api::Block, pub l1_gas_price: u64, pub l2_fair_gas_price: u64, // Cost of publishing one byte. @@ -506,120 +195,141 @@ pub struct ForkDetails { pub estimate_gas_price_scale_factor: f64, /// The factor by which to scale the gasLimit. pub estimate_gas_scale_factor: f32, - pub fee_params: Option, - pub cache_config: CacheConfig, + pub fee_params: FeeParams, } -const SUPPORTED_VERSIONS: &[ProtocolVersionId] = &[ - ProtocolVersionId::Version9, - ProtocolVersionId::Version10, - ProtocolVersionId::Version11, - ProtocolVersionId::Version12, - ProtocolVersionId::Version13, - ProtocolVersionId::Version14, - ProtocolVersionId::Version15, - ProtocolVersionId::Version16, - ProtocolVersionId::Version17, - ProtocolVersionId::Version18, - ProtocolVersionId::Version19, - ProtocolVersionId::Version20, - ProtocolVersionId::Version21, - ProtocolVersionId::Version22, - ProtocolVersionId::Version23, - ProtocolVersionId::Version24, - ProtocolVersionId::Version25, -]; - -pub fn supported_protocol_versions(version: ProtocolVersionId) -> bool { - SUPPORTED_VERSIONS.contains(&version) +impl Default for ForkDetails { + fn default() -> Self { + let config = FeeModelConfigV2 { + minimal_l2_gas_price: 10_000_000_000, + compute_overhead_part: 0.0, + pubdata_overhead_part: 1.0, + batch_overhead_l1_gas: 800_000, + max_gas_per_batch: 200_000_000, + max_pubdata_per_batch: 500_000, + }; + Self { + chain_id: Default::default(), + batch_number: Default::default(), + block_number: Default::default(), + block_hash: Default::default(), + block_timestamp: 0, + api_block: Default::default(), + l1_gas_price: 0, + l2_fair_gas_price: 0, + fair_pubdata_price: 0, + estimate_gas_price_scale_factor: 0.0, + estimate_gas_scale_factor: 0.0, + fee_params: FeeParams::V2(FeeParamsV2::new( + config, + 10_000_000_000, + 5_000_000_000, + BaseTokenConversionRatio::default(), + )), + } + } } -pub fn supported_versions_to_string() -> String { - let versions: Vec = SUPPORTED_VERSIONS - .iter() - .map(|v| format!("{:?}", v)) - .collect(); - versions.join(", ") +pub struct ForkConfig { + pub url: Url, + pub estimate_gas_price_scale_factor: f64, + pub estimate_gas_scale_factor: f32, } -impl fmt::Debug for ForkDetails { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ForkDetails") - .field("chain_id", &self.chain_id) - .field("l1_block", &self.l1_block) - .field("l2_block", &self.l2_block) - .field("l2_miniblock", &self.l2_miniblock) - .field("l2_miniblock_hash", &self.l2_miniblock_hash) - .field("block_timestamp", &self.block_timestamp) - .field("overwrite_chain_id", &self.overwrite_chain_id) - .field("l1_gas_price", &self.l1_gas_price) - .field("l2_fair_gas_price", &self.l2_fair_gas_price) - .finish() +impl ForkConfig { + /// Default configuration for an unknown chain. + pub fn unknown(url: Url) -> Self { + // TODO: Unfortunately there is no endpoint that exposes this information and there is no + // easy way to derive these values either. Recent releases of zksync-era report unscaled + // open batch's fee input and we should mimic something similar. + let (estimate_gas_price_scale_factor, estimate_gas_scale_factor) = ( + DEFAULT_ESTIMATE_GAS_PRICE_SCALE_FACTOR, + DEFAULT_ESTIMATE_GAS_SCALE_FACTOR, + ); + Self { + url, + estimate_gas_price_scale_factor, + estimate_gas_scale_factor, + } } } -impl ForkDetails { - pub async fn from_network_and_miniblock_and_chain( - network: ForkNetwork, - client: Client, - miniblock: u64, - chain_id: Option, - cache_config: &CacheConfig, - ) -> eyre::Result { - let url = network.to_url(); - let opt_block_details = client - .get_block_details(L2BlockNumber(miniblock as u32)) +/// Simple wrapper over `eth`/`zks`-capable client that propagates all [`ForkSource`] RPC requests to it. +#[derive(Debug, Clone)] +pub struct ForkClient { + pub url: Url, + pub details: ForkDetails, + l2_client: Box>, +} + +impl ForkClient { + async fn new( + config: ForkConfig, + l2_client: Box>, + block_number: L2BlockNumber, + ) -> anyhow::Result { + let ForkConfig { + url, + estimate_gas_price_scale_factor, + estimate_gas_scale_factor, + } = config; + let chain_id = l2_client + .chain_id() .await - .map_err(|error| eyre!(error))?; - let block_details = opt_block_details - .ok_or_else(|| eyre!("Could not find block {:?} in {:?}", miniblock, url))?; + .with_context(|| format!("failed to get chain id from fork={url}"))?; + let chain_id = L2ChainId::try_from(chain_id.as_u64()) + .map_err(|e| anyhow::anyhow!("fork has malformed chain id: {e}"))?; + let block_details = l2_client + .get_block_details(block_number) + .await? + .ok_or_else( + || anyhow::anyhow!("could not find block #{block_number} at fork={url}",), + )?; let root_hash = block_details .base .root_hash - .ok_or_else(|| eyre!("fork block #{} missing root hash", miniblock))?; - let opt_block = client + .ok_or_else(|| anyhow::anyhow!("fork block #{block_number} missing root hash"))?; + let mut block = l2_client .get_block_by_hash(root_hash, true) - .await - .map_err(|error| eyre!(error))?; - let mut block = opt_block.ok_or_else(|| { - eyre!( - "Could not find block #{:?} ({:#x}) in {:?}", - miniblock, - root_hash, - url + .await? + .ok_or_else(|| { + anyhow::anyhow!( + "could not find API block #{block_number} at fork={url} despite finding its details \ + through `zks_getBlockDetails` previously; this is likely a bug, please report this", + ) + })?; + let batch_number = block_details.l1_batch_number; + // TODO: This is a bit weird, we should just grab last block from the latest sealed L1 batch + // instead to ensure `l1BatchNumber` is always present. + block.l1_batch_number = Some(batch_number.0.into()); + + if let Some(protocol_version) = block_details.protocol_version { + // TODO: In reality, anvil-zksync only supports one protocol version as we rely on + // compiled contracts from `contracts` submodule. + if !SupportedProtocolVersions::is_supported(protocol_version) { + anyhow::bail!( + "Block #{block_number} from fork={url} is using unsupported protocol version `{protocol_version}`. \ + anvil-zksync only supports the following versions: {SupportedProtocolVersions}." + ) + } + } else { + // It is possible that some external nodes do not store protocol versions for versions below 9. + // That's why we assume that whenever a protocol version is not present, it is unsupported by anvil-zksync. + anyhow::bail!( + "Block #{block_number} from fork={url} does not have protocol version set. \ + Likely you are using an external node with a block for protocol version below 9 which are unsupported in anvil-zksync. \ + Please report this as a bug if that's not the case." ) - })?; - let l1_batch_number = block_details.l1_batch_number; - block.l1_batch_number = Some(l1_batch_number.0.into()); - - if !block_details - .protocol_version - .is_some_and(supported_protocol_versions) - { - return Err(eyre!("This block is using the unsupported protocol version: {:?}. This binary supports versions {}.", - block_details.protocol_version, - supported_versions_to_string())); } - let (estimate_gas_price_scale_factor, estimate_gas_scale_factor) = - network.local_gas_scale_factors(); - let fee_params = match client.get_fee_params().await { - Ok(fp) => Some(fp), - Err(error) => { - tracing::warn!("Cannot get fee params: {:?}", error); - None - } - }; - - Ok(ForkDetails { - fork_source: Box::new(HttpForkSource::new(url.to_owned(), cache_config.clone())), - chain_id: chain_id.unwrap_or_else(|| L2ChainId::from(TEST_NODE_NETWORK_ID)), - l1_block: l1_batch_number, - l2_block: block, + let fee_params = l2_client.get_fee_params().await?; + let details = ForkDetails { + chain_id, + batch_number, + block_number, + block_hash: root_hash, block_timestamp: block_details.base.timestamp, - l2_miniblock: miniblock, - l2_miniblock_hash: root_hash, - overwrite_chain_id: chain_id, + api_block: block, l1_gas_price: block_details.base.l1_gas_price, l2_fair_gas_price: block_details.base.l2_fair_gas_price, fair_pubdata_price: block_details @@ -629,424 +339,713 @@ impl ForkDetails { estimate_gas_price_scale_factor, estimate_gas_scale_factor, fee_params, - cache_config: cache_config.clone(), // TODO: This is a temporary solution, we should avoid cloning the cache config here. We should look to refactor how cache is being configured / used as it currently feels a bit too rigid. See: https://github.com/matter-labs/anvil-zksync/issues/387 - }) + }; + let fork = ForkClient { + url, + details, + l2_client, + }; + Ok(fork) } - /// Create a fork from a given network at a given height. - pub async fn from_network( - fork: &str, - fork_block_number: Option, - cache_config: &CacheConfig, - ) -> eyre::Result { - let (network, client) = Self::fork_network_and_client(fork)?; - let chain_id_u64 = client.chain_id().await?; - let chain_id = L2ChainId::from(chain_id_u64.as_u32()); - - let l2_miniblock = if let Some(fork_block_number) = fork_block_number { - fork_block_number + + /// Initializes a fork based on config at a given block number. + pub async fn at_block_number( + config: ForkConfig, + block_number: Option, + ) -> anyhow::Result { + let l2_client = + zksync_web3_decl::client::Client::http(SensitiveUrl::from(config.url.clone()))?.build(); + let block_number = if let Some(block_number) = block_number { + block_number } else { - match client.get_block_number().await { - Ok(bn) => bn.as_u64(), - Err(error) => { - return Err(eyre!(error)); - } - } + let block_number = l2_client + .get_block_number() + .await + .with_context(|| format!("failed to get block number from fork={}", config.url))?; + L2BlockNumber(block_number.as_u32()) }; - Self::from_network_and_miniblock_and_chain( - network, - client, - l2_miniblock, - chain_id.into(), - cache_config, - ) - .await + Self::new(config, Box::new(l2_client), block_number).await } - /// Create a fork from a given network, at a height BEFORE a transaction. + /// Initializes a fork based on config at a block BEFORE given transaction. /// This will allow us to apply this transaction locally on top of this fork. - pub async fn from_network_tx( - fork: &str, - tx: H256, - cache_config: &CacheConfig, - ) -> eyre::Result { - let (network, client) = Self::fork_network_and_client(fork)?; - let opt_tx_details = client - .get_transaction_by_hash(tx) - .await - .map_err(|error| eyre!(error))?; - let tx_details = opt_tx_details.ok_or_else(|| eyre!("could not find {:?}", tx))?; - let overwrite_chain_id = L2ChainId::try_from(tx_details.chain_id.as_u64()) - .map_err(|error| eyre!("erroneous chain id {}: {:?}", tx_details.chain_id, error))?; - let block_number = tx_details - .block_number - .ok_or_else(|| eyre!("tx {:?} has no block number", tx))?; - let miniblock_number = L2BlockNumber(block_number.as_u32()); - // We have to sync to the one-miniblock before the one where transaction is. - let l2_miniblock = miniblock_number.saturating_sub(1) as u64; - - Self::from_network_and_miniblock_and_chain( - network, - client, - l2_miniblock, - Some(overwrite_chain_id), - cache_config, - ) - .await + pub async fn at_before_tx( + config: ForkConfig, + tx_hash: H256, + ) -> anyhow::Result<(Self, Vec)> { + let l2_client = + zksync_web3_decl::client::Client::http(SensitiveUrl::from(config.url.clone()))?.build(); + let tx_details = l2_client + .get_transaction_by_hash(tx_hash) + .await? + .ok_or_else(|| { + anyhow::anyhow!( + "could not find tx with hash={tx_hash:?} at fork={}", + config.url + ) + })?; + let block_number = tx_details.block_number.ok_or_else(|| { + anyhow::anyhow!( + "could not initialize fork from tx with hash={tx_hash:?} as it is still pending" + ) + })?; + let block_number = L2BlockNumber(block_number.as_u32()); + if block_number == L2BlockNumber(0) { + anyhow::bail!( + "could not initialize fork from tx with hash={tx_hash:?} as it belongs to genesis" + ); + } + let mut earlier_txs = Vec::new(); + for tx in l2_client.get_raw_block_transactions(block_number).await? { + let hash = tx.hash(); + let l2_tx: L2Tx = tx + .try_into() + .map_err(|e| anyhow::anyhow!("Failed to convert to L2 transaction: {e}"))?; + earlier_txs.push(l2_tx); + + if hash == tx_hash { + break; + } + } + + // We initialize fork from the parent of the block containing transaction. + Ok(( + Self::new(config, Box::new(l2_client), block_number - 1).await?, + earlier_txs, + )) } +} - /// Return URL and HTTP client for `hardhat_reset`. - pub fn from_url( - url: String, - fork_block_number: Option, - cache_config: CacheConfig, - ) -> eyre::Result { - let parsed_url = SensitiveUrl::from_str(&url)?; - let builder = Client::http(parsed_url).map_err(|error| eyre!(error))?; - let client = builder.build(); - - block_on(async move { - let chain_id_u64 = client.chain_id().await?; - let chain_id = L2ChainId::from(chain_id_u64.as_u32()); - let l2_miniblock = if let Some(fork_block_number) = fork_block_number { - fork_block_number - } else { - client.get_block_number().await?.as_u64() - }; - - Self::from_network_and_miniblock_and_chain( - ForkNetwork::Other(url), - client, - l2_miniblock, - chain_id.into(), - &cache_config, - ) +impl ForkClient { + pub async fn get_fee_params(&self) -> anyhow::Result { + self.l2_client + .get_fee_params() .await - }) + .with_context(|| format!("failed to get fee parameters from fork={}", self.url)) } +} - /// Return [`ForkNetwork`] and HTTP client for a given fork name. - pub fn fork_network_and_client(fork: &str) -> eyre::Result<(ForkNetwork, Client)> { - let network = match fork { - "mainnet" => ForkNetwork::Mainnet, - "sepolia-testnet" => ForkNetwork::SepoliaTestnet, - "goerli-testnet" => ForkNetwork::GoerliTestnet, - _ => ForkNetwork::Other(fork.to_string()), - }; - - let url = network.to_url(); - let parsed_url = SensitiveUrl::from_str(url) - .map_err(|_| eyre!("Unable to parse client URL: {}", &url))?; - let builder = Client::http(parsed_url) - .map_err(|_| eyre!("Unable to create a client for fork: {}", &url))?; - Ok((network, builder.build())) +#[cfg(test)] +impl ForkClient { + pub fn mock(details: ForkDetails, storage: crate::deps::InMemoryStorage) -> Self { + use zksync_types::{u256_to_h256, AccountTreeId, StorageKey, H160}; + + let storage = Arc::new(RwLock::new(storage)); + let storage_clone = storage.clone(); + let l2_client = Box::new( + zksync_web3_decl::client::MockClient::builder(L2::default()) + .method( + "eth_getStorageAt", + move |address: Address, idx: U256, _block: Option| { + let key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(idx)); + Ok(storage + .read() + .unwrap() + .state + .get(&key) + .cloned() + .unwrap_or_default()) + }, + ) + .method("zks_getBytecodeByHash", move |hash: H256| { + Ok(storage_clone + .read() + .unwrap() + .factory_deps + .get(&hash) + .cloned()) + }) + .method("zks_getBlockDetails", move |block_number: L2BlockNumber| { + Ok(Some(api::BlockDetails { + number: block_number, + l1_batch_number: L1BatchNumber(123), + base: api::BlockDetailsBase { + timestamp: 0, + l1_tx_count: 0, + l2_tx_count: 0, + root_hash: None, + status: api::BlockStatus::Sealed, + commit_tx_hash: None, + committed_at: None, + commit_chain_id: None, + prove_tx_hash: None, + proven_at: None, + prove_chain_id: None, + execute_tx_hash: None, + executed_at: None, + execute_chain_id: None, + l1_gas_price: 123, + l2_fair_gas_price: 234, + fair_pubdata_price: Some(345), + base_system_contracts_hashes: Default::default(), + }, + operator_address: H160::zero(), + protocol_version: None, + })) + }) + .build(), + ); + ForkClient { + url: Url::parse("http://test-fork-in-memory-storage.local").unwrap(), + details, + l2_client, + } } +} - /// Returns transactions that are in the same L2 miniblock as replay_tx, but were executed before it. - pub fn get_earlier_transactions_in_same_block( - &self, - replay_tx: H256, - ) -> eyre::Result> { - let opt_tx_details = self - .fork_source - .get_transaction_by_hash(replay_tx) - .map_err(|err| { - eyre!( - "Cannot get transaction to replay by hash from fork source: {:?}", - err - ) - })?; - let tx_details = - opt_tx_details.ok_or_else(|| eyre!("Cannot find transaction {:?}", replay_tx))?; - let block_number = tx_details - .block_number - .ok_or_else(|| eyre!("Block has no number"))?; - let miniblock = L2BlockNumber(block_number.as_u32()); - - // And we're fetching all the transactions from this miniblock. - let block_transactions = self.fork_source.get_raw_block_transactions(miniblock)?; - - let mut tx_to_apply = Vec::new(); - for tx in block_transactions { - let h = tx.hash(); - let l2_tx: L2Tx = tx.try_into().unwrap(); - tx_to_apply.push(l2_tx); - - if h == replay_tx { - return Ok(tx_to_apply); - } +#[derive(Debug, Clone)] +pub(super) struct Fork { + state: Arc>, +} + +#[derive(Debug)] +struct ForkState { + client: Option, + cache: Cache, +} + +impl Fork { + pub(super) fn new(client: Option, cache_config: CacheConfig) -> Self { + let cache = Cache::new(cache_config); + Self { + state: Arc::new(RwLock::new(ForkState { client, cache })), } - Err(eyre!( - "Cound not find tx {:?} in miniblock: {:?}", - replay_tx, - miniblock - )) } - /// Returns - /// - /// - `l1_gas_price` - /// - `l2_fair_gas_price` - /// - `fair_pubdata_price` - /// - /// for the given l2 block. - pub fn get_block_gas_details(&self, miniblock: u32) -> Option<(u64, u64, u64)> { - let res_opt_block_details = self.fork_source.get_block_details(L2BlockNumber(miniblock)); - match res_opt_block_details { - Ok(opt_block_details) => { - if let Some(block_details) = opt_block_details { - if let Some(fair_pubdata_price) = block_details.base.fair_pubdata_price { - Some(( - block_details.base.l1_gas_price, - block_details.base.l2_fair_gas_price, - fair_pubdata_price, - )) - } else { - tracing::warn!( - "Fair pubdata price is not present in {} l2 block details", - miniblock - ); - None - } - } else { - tracing::warn!("No block details for {}", miniblock); - None - } - } - Err(e) => { - tracing::warn!("Error getting block details: {:?}", e); - None - } + pub(super) fn reset_fork_client(&self, client: Option) { + // TODO: We don't clean cache here so it might interfere with the new fork. Consider + // parametrizing cache by fork URL to avoid this. + self.write().client = client; + } + + pub(super) fn set_fork_url(&self, new_url: Url) -> Option { + // We are assuming that the new url is pointing to the same logical data source so we do not + // invalidate cache + let mut writer = self.write(); + if let Some(client) = writer.client.as_mut() { + Some(std::mem::replace(&mut client.url, new_url)) + } else { + None } } - /// Sets fork's internal URL. Assumes the underlying chain is the same as before. - pub fn set_rpc_url(&mut self, url: String) { - self.fork_source = Box::new(HttpForkSource::new(url, self.cache_config.clone())); + fn read(&self) -> RwLockReadGuard { + self.state.read().expect("Fork lock is poisoned") } - // Sets fork's chain id. - pub fn set_chain_id(&mut self, id: L2ChainId) { - self.chain_id = id; - self.overwrite_chain_id = Some(id); + fn write(&self) -> RwLockWriteGuard { + self.state.write().expect("Fork lock is poisoned") } -} -/// Serializable representation of [`ForkStorage`]'s state. -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct SerializableForkStorage { - /// Node's current key-value storage state (contains both local and cached fork data if applicable). - pub storage: SerializableStorage, - /// Factory dependencies by their hash. - pub factory_deps: BTreeMap, + async fn make_call>>( + &self, + method: &str, + call_body: impl FnOnce(Box>) -> F, + ) -> Option> { + let (client, span) = if let Some(client) = self.read().client.as_ref() { + let span = tracing::info_span!("fork_rpc_call", method, url = %client.url); + (client.l2_client.clone(), span) + } else { + return None; + }; + Some( + call_body(client) + .map_err(|error| { + tracing::error!(%error, "call failed"); + error + }) + .instrument(span) + .await, + ) + } } -/// Wrapper for [`BTreeMap`] to avoid serializing [`StorageKey`] as a struct. -/// JSON does not support non-string keys so we use conversion to [`Bytes`] via [`crate::node::state::SerializableStorageKey`] -/// instead. -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde( - into = "BTreeMap", - from = "BTreeMap" -)] -pub struct SerializableStorage(pub BTreeMap); - -mod serde_from { - use super::SerializableStorage; - use serde::{Deserialize, Serialize}; - use std::collections::BTreeMap; - use std::convert::TryFrom; - use zksync_types::web3::Bytes; - use zksync_types::{AccountTreeId, Address, StorageKey, StorageValue, H256}; - - impl From> for SerializableStorage { - fn from(value: BTreeMap) -> Self { - SerializableStorage(value.into_iter().map(|(k, v)| (k.into(), v)).collect()) - } +#[async_trait] +impl ForkSource for Fork { + fn dyn_cloned(&self) -> Box { + Box::new(self.clone()) + } + + fn url(&self) -> Option { + self.read().client.as_ref().map(|client| client.url.clone()) + } + + fn details(&self) -> Option { + self.read() + .client + .as_ref() + .map(|client| client.details.clone()) + } + + async fn get_storage_at( + &self, + address: Address, + idx: U256, + block: Option, + ) -> anyhow::Result { + // TODO: This is currently cached at the `ForkStorage` level but I am unsure if this is a + // good thing. Intuitively it feels like cache should be centralized in a single place. + self.make_call("get_storage_at", |client| async move { + client + .get_storage_at(address, idx, block) + .await + .with_context(|| format!("(address={address:?}, idx={idx:?})")) + }) + .await + .unwrap_or(Ok(H256::zero())) + } + + async fn get_storage_at_forked(&self, address: Address, idx: U256) -> anyhow::Result { + let Some(block_number) = self + .read() + .client + .as_ref() + .map(|client| client.details.block_number) + else { + return Ok(H256::zero()); + }; + self.get_storage_at( + address, + idx, + Some(api::BlockIdVariant::BlockNumber(api::BlockNumber::Number( + block_number.0.into(), + ))), + ) + .await } - impl From for BTreeMap { - fn from(value: SerializableStorage) -> Self { - value.0.into_iter().map(|(k, v)| (k.into(), v)).collect() + async fn get_bytecode_by_hash(&self, hash: H256) -> anyhow::Result>> { + // TODO: This is currently cached at the `ForkStorage` level but I am unsure if this is a + // good thing. Intuitively it feels like cache should be centralized in a single place. + self.make_call("get_bytecode_by_hash", |client| async move { + client + .get_bytecode_by_hash(hash) + .await + .with_context(|| format!("(hash={hash:?})")) + }) + .await + .unwrap_or(Ok(None)) + } + + async fn get_transaction_by_hash( + &self, + hash: H256, + ) -> anyhow::Result> { + if let Some(tx) = self.read().cache.get_transaction(&hash).cloned() { + tracing::debug!(?hash, "using cached transaction"); + return Ok(Some(tx)); + } + + let tx = self + .make_call("get_transaction_by_hash", |client| async move { + client + .get_transaction_by_hash(hash) + .await + .with_context(|| format!("(hash={hash:?})")) + }) + .await + .unwrap_or(Ok(None))?; + + if let Some(tx) = tx { + self.write().cache.insert_transaction(hash, tx.clone()); + Ok(Some(tx)) + } else { + Ok(None) } } - #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] - #[serde(into = "Bytes", try_from = "Bytes")] - pub struct SerializableStorageKey(StorageKey); + async fn get_transaction_details( + &self, + hash: H256, + ) -> anyhow::Result> { + // N.B. We don't cache these responses as they will change through the lifecycle of the transaction + // and caching could be error-prone. In theory, we could cache responses once the txn status + // is `final` or `failed` but currently this does not warrant the additional complexity. + self.make_call("get_transaction_details", |client| async move { + client + .get_transaction_details(hash) + .await + .with_context(|| format!("(hash={hash:?})")) + }) + .await + .unwrap_or(Ok(None)) + } - impl From for SerializableStorageKey { - fn from(value: StorageKey) -> Self { - SerializableStorageKey(value) + async fn get_raw_block_transactions( + &self, + block_number: L2BlockNumber, + ) -> anyhow::Result> { + if let Some(txs) = self + .read() + .cache + .get_block_raw_transactions(&(block_number.0 as u64)) + .cloned() + { + tracing::debug!(%block_number, "using cached block raw transactions"); + return Ok(txs); } + + let txs = self + .make_call("get_raw_block_transactions", |client| async move { + client + .get_raw_block_transactions(block_number) + .await + .with_context(|| format!("(block_number={block_number})")) + }) + .await + .unwrap_or(Err(Web3Error::NoBlock.into()))?; + + self.write() + .cache + .insert_block_raw_transactions(block_number.0 as u64, txs.clone()); + Ok(txs) } - impl From for StorageKey { - fn from(value: SerializableStorageKey) -> Self { - value.0 + async fn get_block_by_hash( + &self, + hash: H256, + ) -> anyhow::Result>> { + if let Some(block) = self.read().cache.get_block(&hash, true).cloned() { + tracing::debug!(?hash, "using cached block"); + return Ok(Some(block)); + } + + let block = self + .make_call("get_block_by_hash", |client| async move { + client + .get_block_by_hash(hash, true) + .await + .with_context(|| format!("(hash={hash:?}, full_transactions=true)")) + }) + .await + .unwrap_or(Ok(None))?; + + if let Some(block) = block { + self.write().cache.insert_block(hash, true, block.clone()); + Ok(Some(block)) + } else { + Ok(None) } } - impl TryFrom for SerializableStorageKey { - type Error = anyhow::Error; + async fn get_block_by_number( + &self, + block_number: api::BlockNumber, + ) -> anyhow::Result>> { + match block_number { + api::BlockNumber::Number(block_number) => { + { + let guard = self.read(); + let cache = &guard.cache; + if let Some(block) = cache + .get_block_hash(&block_number.as_u64()) + .and_then(|hash| cache.get_block(hash, true)) + .cloned() + { + tracing::debug!(%block_number, "using cached block"); + return Ok(Some(block)); + } + } - fn try_from(bytes: Bytes) -> anyhow::Result { - if bytes.0.len() != 52 { - anyhow::bail!("invalid bytes length (expected 52, got {})", bytes.0.len()) + let block = self + .make_call("get_block_by_number", |client| async move { + client + .get_block_by_number(api::BlockNumber::Number(block_number), true) + .await + .with_context(|| { + format!("(block_number={block_number}, full_transactions=true)") + }) + }) + .await + .unwrap_or(Ok(None))?; + + if let Some(block) = block { + self.write() + .cache + .insert_block(block.hash, true, block.clone()); + Ok(Some(block)) + } else { + Ok(None) + } } - let address = Address::from_slice(&bytes.0[0..20]); - let key = H256::from_slice(&bytes.0[20..52]); - Ok(SerializableStorageKey(StorageKey::new( - AccountTreeId::new(address), - key, - ))) + _ => self + .make_call("get_block_by_number", |client| async move { + client + .get_block_by_number(block_number, true) + .await + .with_context(|| { + format!("(block_number={block_number}, full_transactions=true)") + }) + }) + .await + .unwrap_or(Ok(None)), } } - impl From for Bytes { - fn from(value: SerializableStorageKey) -> Self { - let bytes = [value.0.address().as_bytes(), value.0.key().as_bytes()].concat(); - bytes.into() - } + async fn get_block_details( + &self, + block_number: L2BlockNumber, + ) -> anyhow::Result> { + // N.B. We don't cache these responses as they will change through the lifecycle of the block + // and caching could be error-prone. In theory, we could cache responses once the block + // is finalized but currently this does not warrant the additional complexity. + self.make_call("get_block_details", |client| async move { + client + .get_block_details(block_number) + .await + .with_context(|| format!("(block_number={block_number})")) + }) + .await + .unwrap_or(Ok(None)) } -} -#[cfg(test)] -mod tests { - use super::{ForkDetails, ForkStorage}; - use crate::{deps::InMemoryStorage, testing}; - use anvil_zksync_config::constants::{ - DEFAULT_ESTIMATE_GAS_PRICE_SCALE_FACTOR, DEFAULT_ESTIMATE_GAS_SCALE_FACTOR, - DEFAULT_FAIR_PUBDATA_PRICE, DEFAULT_L2_GAS_PRICE, TEST_NODE_NETWORK_ID, - }; - use anvil_zksync_config::types::{CacheConfig, SystemContractsOptions}; - use zksync_multivm::interface::storage::ReadStorage; - use zksync_types::{api::TransactionVariant, StorageKey}; - use zksync_types::{ - get_system_context_key, AccountTreeId, L1BatchNumber, L2ChainId, H256, - SYSTEM_CONTEXT_CHAIN_ID_POSITION, - }; - - #[test] - fn test_initial_writes() { - let account = AccountTreeId::default(); - let never_written_key = StorageKey::new(account, H256::from_low_u64_be(1)); - let key_with_some_value = StorageKey::new(account, H256::from_low_u64_be(2)); - let key_with_value_0 = StorageKey::new(account, H256::from_low_u64_be(3)); - let mut in_memory_storage = InMemoryStorage::default(); - in_memory_storage.set_value(key_with_some_value, H256::from_low_u64_be(13)); - in_memory_storage.set_value(key_with_value_0, H256::from_low_u64_be(0)); - - let external_storage = testing::ExternalStorage { - raw_storage: in_memory_storage, - }; + async fn get_block_transaction_count_by_hash( + &self, + block_hash: H256, + ) -> anyhow::Result> { + // TODO: Cache? + self.make_call("get_block_transaction_count_by_hash", |client| async move { + client + .get_block_transaction_count_by_hash(block_hash) + .await + .with_context(|| format!("(block_hash={block_hash:?})")) + }) + .await + .unwrap_or(Ok(None)) + } - let options = SystemContractsOptions::default(); + async fn get_block_transaction_count_by_number( + &self, + block_number: api::BlockNumber, + ) -> anyhow::Result> { + // TODO: Cache? + self.make_call( + "get_block_transaction_count_by_number", + |client| async move { + client + .get_block_transaction_count_by_number(block_number) + .await + .with_context(|| format!("(block_number={block_number})")) + }, + ) + .await + .unwrap_or(Ok(None)) + } - let fork_details = ForkDetails { - fork_source: Box::new(external_storage), - chain_id: TEST_NODE_NETWORK_ID.into(), - l1_block: L1BatchNumber(1), - l2_block: zksync_types::api::Block::::default(), - l2_miniblock: 1, - l2_miniblock_hash: H256::zero(), - block_timestamp: 0, - overwrite_chain_id: None, - l1_gas_price: 100, - l2_fair_gas_price: DEFAULT_L2_GAS_PRICE, - fair_pubdata_price: DEFAULT_FAIR_PUBDATA_PRICE, - estimate_gas_price_scale_factor: DEFAULT_ESTIMATE_GAS_PRICE_SCALE_FACTOR, - estimate_gas_scale_factor: DEFAULT_ESTIMATE_GAS_SCALE_FACTOR, - fee_params: None, - cache_config: CacheConfig::None, - }; + async fn get_transaction_by_block_hash_and_index( + &self, + block_hash: H256, + index: Index, + ) -> anyhow::Result> { + // TODO: Cache? + self.make_call( + "get_transaction_by_block_hash_and_index", + |client| async move { + client + .get_transaction_by_block_hash_and_index(block_hash, index) + .await + .with_context(|| format!("(block_hash={block_hash:?}, index={index})")) + }, + ) + .await + .unwrap_or(Ok(None)) + } - let mut fork_storage: ForkStorage = - ForkStorage::new(Some(fork_details), &options, false, None); + async fn get_transaction_by_block_number_and_index( + &self, + block_number: api::BlockNumber, + index: Index, + ) -> anyhow::Result> { + // TODO: Cache? + self.make_call( + "get_transaction_by_block_number_and_index", + |client| async move { + client + .get_transaction_by_block_number_and_index(block_number, index) + .await + .with_context(|| format!("(block_number={block_number}, index={index})")) + }, + ) + .await + .unwrap_or(Ok(None)) + } - assert!(fork_storage.is_write_initial(&never_written_key)); - assert!(!fork_storage.is_write_initial(&key_with_some_value)); - // This is the current limitation of the system. In theory, this should return false - as the value was written, but we don't have the API to the - // backend to get this information. - assert!(fork_storage.is_write_initial(&key_with_value_0)); + async fn get_bridge_contracts(&self) -> anyhow::Result> { + if let Some(bridge_contracts) = self.read().cache.get_bridge_addresses().cloned() { + tracing::debug!("using cached bridge contracts"); + return Ok(Some(bridge_contracts)); + } + + let bridge_contracts = self + .make_call("get_bridge_contracts", |client| async move { + Ok(Some(client.get_bridge_contracts().await?)) + }) + .await + .unwrap_or(Ok(None))?; - // But writing any value there in the local storage (even 0) - should make it non-initial write immediately. - fork_storage.set_value(key_with_value_0, H256::zero()); - assert!(!fork_storage.is_write_initial(&key_with_value_0)); + if let Some(bridge_contracts) = bridge_contracts { + self.write() + .cache + .set_bridge_addresses(bridge_contracts.clone()); + Ok(Some(bridge_contracts)) + } else { + Ok(None) + } } - #[test] - fn test_get_block_gas_details() { - let fork_details = ForkDetails { - fork_source: Box::new(testing::ExternalStorage { - raw_storage: InMemoryStorage::default(), - }), - chain_id: TEST_NODE_NETWORK_ID.into(), - l1_block: L1BatchNumber(0), - l2_block: zksync_types::api::Block::::default(), - l2_miniblock: 0, - l2_miniblock_hash: H256::zero(), - block_timestamp: 0, - overwrite_chain_id: None, - l1_gas_price: 0, - l2_fair_gas_price: 0, - fair_pubdata_price: 0, - estimate_gas_price_scale_factor: 0.0, - estimate_gas_scale_factor: 0.0, - fee_params: None, - cache_config: CacheConfig::None, - }; + async fn get_confirmed_tokens( + &self, + from: u32, + limit: u8, + ) -> anyhow::Result>> { + if let Some(confirmed_tokens) = self.read().cache.get_confirmed_tokens(from, limit).cloned() + { + tracing::debug!(from, limit, "using cached confirmed tokens"); + return Ok(Some(confirmed_tokens)); + } - let actual_result = fork_details.get_block_gas_details(1); - let expected_result = Some((123, 234, 345)); + let confirmed_tokens = self + .make_call("get_block_details", |client| async move { + Ok(Some( + client + .get_confirmed_tokens(from, limit) + .await + .with_context(|| format!("(from={from}, limit={limit})"))?, + )) + }) + .await + .unwrap_or(Ok(None))?; - assert_eq!(actual_result, expected_result); + if let Some(confirmed_tokens) = confirmed_tokens { + self.write() + .cache + .set_confirmed_tokens(from, limit, confirmed_tokens.clone()); + Ok(Some(confirmed_tokens)) + } else { + Ok(None) + } } +} - #[test] - fn test_fork_storage_set_chain_id() { - let fork_details = ForkDetails { - fork_source: Box::new(testing::ExternalStorage { - raw_storage: InMemoryStorage::default(), - }), - chain_id: TEST_NODE_NETWORK_ID.into(), - l1_block: L1BatchNumber(0), - l2_block: zksync_types::api::Block::::default(), - l2_miniblock: 0, - l2_miniblock_hash: H256::zero(), - block_timestamp: 0, - overwrite_chain_id: None, - l1_gas_price: 0, - l2_fair_gas_price: 0, - fair_pubdata_price: 0, - estimate_gas_price_scale_factor: 0.0, - estimate_gas_scale_factor: 0.0, - fee_params: None, - cache_config: CacheConfig::None, - }; - let mut fork_storage: ForkStorage = ForkStorage::new( - Some(fork_details), - &SystemContractsOptions::default(), - false, - None, - ); - let new_chain_id = L2ChainId::from(261); - fork_storage.set_chain_id(new_chain_id); +struct SupportedProtocolVersions; + +impl SupportedProtocolVersions { + const SUPPORTED_VERSIONS: [ProtocolVersionId; 17] = [ + ProtocolVersionId::Version9, + ProtocolVersionId::Version10, + ProtocolVersionId::Version11, + ProtocolVersionId::Version12, + ProtocolVersionId::Version13, + ProtocolVersionId::Version14, + ProtocolVersionId::Version15, + ProtocolVersionId::Version16, + ProtocolVersionId::Version17, + ProtocolVersionId::Version18, + ProtocolVersionId::Version19, + ProtocolVersionId::Version20, + ProtocolVersionId::Version21, + ProtocolVersionId::Version22, + ProtocolVersionId::Version23, + ProtocolVersionId::Version24, + ProtocolVersionId::Version25, + ]; + + fn is_supported(version: ProtocolVersionId) -> bool { + Self::SUPPORTED_VERSIONS.contains(&version) + } +} - let inner = fork_storage.inner.read().unwrap(); +impl fmt::Display for SupportedProtocolVersions { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str( + &Self::SUPPORTED_VERSIONS + .iter() + .map(|v| v.to_string()) + .join(", "), + ) + } +} - assert_eq!(new_chain_id, fork_storage.chain_id); - assert_eq!( - new_chain_id, - inner.fork.as_ref().map(|f| f.chain_id).unwrap() +#[cfg(test)] +mod test { + use super::*; + use crate::deps::InMemoryStorage; + use maplit::hashmap; + use zksync_types::block::{pack_block_info, unpack_block_info}; + use zksync_types::{h256_to_u256, u256_to_h256, AccountTreeId, StorageKey}; + + #[tokio::test] + async fn test_mock_client() { + let input_batch = 1; + let input_l2_block = 2; + let input_timestamp = 3; + let input_bytecode = vec![0x4]; + let batch_key = StorageKey::new( + AccountTreeId::new(zksync_types::SYSTEM_CONTEXT_ADDRESS), + zksync_types::SYSTEM_CONTEXT_BLOCK_INFO_POSITION, ); - assert_eq!( - H256::from_low_u64_be(new_chain_id.as_u64()), - *inner - .raw_storage - .state - .get(&get_system_context_key(SYSTEM_CONTEXT_CHAIN_ID_POSITION)) - .unwrap() + let l2_block_key = StorageKey::new( + AccountTreeId::new(zksync_types::SYSTEM_CONTEXT_ADDRESS), + zksync_types::SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, ); + + let client = ForkClient::mock( + ForkDetails::default(), + InMemoryStorage { + state: hashmap! { + batch_key => u256_to_h256(U256::from(input_batch)), + l2_block_key => u256_to_h256(pack_block_info( + input_l2_block, + input_timestamp, + )) + }, + factory_deps: hashmap! { + H256::repeat_byte(0x1) => input_bytecode.clone(), + }, + }, + ); + let fork = Fork::new(Some(client), CacheConfig::None); + + let actual_batch = fork + .get_storage_at( + zksync_types::SYSTEM_CONTEXT_ADDRESS, + h256_to_u256(zksync_types::SYSTEM_CONTEXT_BLOCK_INFO_POSITION), + None, + ) + .await + .map(|value| h256_to_u256(value).as_u64()) + .expect("failed getting batch number"); + assert_eq!(input_batch, actual_batch); + + let (actual_l2_block, actual_timestamp) = fork + .get_storage_at( + zksync_types::SYSTEM_CONTEXT_ADDRESS, + h256_to_u256(zksync_types::SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION), + None, + ) + .await + .map(|value| unpack_block_info(h256_to_u256(value))) + .expect("failed getting l2 block info"); + assert_eq!(input_l2_block, actual_l2_block); + assert_eq!(input_timestamp, actual_timestamp); + + let zero_missing_value = fork + .get_storage_at( + zksync_types::SYSTEM_CONTEXT_ADDRESS, + h256_to_u256(H256::repeat_byte(0x1e)), + None, + ) + .await + .map(|value| h256_to_u256(value).as_u64()) + .expect("failed missing value"); + assert_eq!(0, zero_missing_value); + + let actual_bytecode = fork + .get_bytecode_by_hash(H256::repeat_byte(0x1)) + .await + .expect("failed getting bytecode") + .expect("missing bytecode"); + assert_eq!(input_bytecode, actual_bytecode); } } diff --git a/crates/core/src/node/inner/fork_storage.rs b/crates/core/src/node/inner/fork_storage.rs new file mode 100644 index 00000000..be8baeaf --- /dev/null +++ b/crates/core/src/node/inner/fork_storage.rs @@ -0,0 +1,440 @@ +//! This file hold tools used for test-forking other networks. +//! +//! There is ForkStorage (that is a wrapper over InMemoryStorage) +//! And ForkDetails - that parses network address and fork height from arguments. + +use crate::deps::InMemoryStorage; +use crate::node::inner::fork::{Fork, ForkSource}; +use crate::node::inner::storage::ReadStorageDyn; +use crate::utils; +use anvil_zksync_config::constants::TEST_NODE_NETWORK_ID; +use anvil_zksync_config::types::SystemContractsOptions; +use async_trait::async_trait; +use eyre::eyre; +use serde::{Deserialize, Serialize}; +use std::collections::{BTreeMap, HashMap}; +use std::iter::FromIterator; +use std::sync::{Arc, RwLock}; +use zksync_multivm::interface::storage::ReadStorage; +use zksync_types::bytecode::BytecodeHash; +use zksync_types::web3::Bytes; +use zksync_types::{ + get_system_context_key, h256_to_u256, L2ChainId, StorageKey, StorageValue, H256, + SYSTEM_CONTEXT_CHAIN_ID_POSITION, +}; + +/// In memory storage, that allows 'forking' from other network. +/// If forking is enabled, it reads missing data from remote location. +#[derive(Debug, Clone)] +pub struct ForkStorage { + pub inner: Arc>, + pub chain_id: L2ChainId, +} + +// TODO: Hide mutable state and mark everything with `pub(super)` +#[derive(Debug)] +pub struct ForkStorageInner { + // Underlying local storage + pub raw_storage: InMemoryStorage, + // Cache of data that was read from remote location. + pub(super) value_read_cache: HashMap, + // Cache of factory deps that were read from remote location. + pub(super) factory_dep_cache: HashMap>>, + // If set - it hold the necessary information on where to fetch the data. + // If not set - it will simply read from underlying storage. + fork: Fork, +} + +impl ForkStorage { + pub(super) fn new( + fork: Fork, + system_contracts_options: &SystemContractsOptions, + use_evm_emulator: bool, + override_chain_id: Option, + ) -> Self { + let chain_id = if let Some(override_id) = override_chain_id { + L2ChainId::from(override_id) + } else { + fork.details() + .map(|fd| fd.chain_id) + .unwrap_or(L2ChainId::from(TEST_NODE_NETWORK_ID)) + }; + + ForkStorage { + inner: Arc::new(RwLock::new(ForkStorageInner { + raw_storage: InMemoryStorage::with_system_contracts_and_chain_id( + chain_id, + |b| BytecodeHash::for_bytecode(b).value(), + system_contracts_options, + use_evm_emulator, + ), + value_read_cache: Default::default(), + fork, + factory_dep_cache: Default::default(), + })), + chain_id, + } + } + + pub fn read_value_internal(&self, key: &StorageKey) -> eyre::Result { + let fork = { + let mut writer = self.inner.write().unwrap(); + let local_storage = writer.raw_storage.read_value(key); + if local_storage != H256::zero() { + return Ok(local_storage); + } + if let Some(value) = writer.value_read_cache.get(key) { + return Ok(*value); + } + writer.fork.clone() + }; + let address = *key.account().address(); + let idx = h256_to_u256(*key.key()); + let value = + utils::block_on(async move { fork.get_storage_at_forked(address, idx).await }).unwrap(); + + let mut writer = self.inner.write().unwrap(); + writer.value_read_cache.insert(*key, value); + Ok(value) + } + + pub fn load_factory_dep_internal(&self, hash: H256) -> eyre::Result>> { + let fork = { + let mut writer = self.inner.write().unwrap(); + let local_storage = writer.raw_storage.load_factory_dep(hash); + if local_storage.is_some() { + return Ok(local_storage); + } + if let Some(value) = writer.factory_dep_cache.get(&hash) { + return Ok(value.clone()); + } + writer.fork.clone() + }; + + let result = utils::block_on(async move { fork.get_bytecode_by_hash(hash).await }).unwrap(); + + let mut writer = self.inner.write().unwrap(); + writer.factory_dep_cache.insert(hash, result.clone()); + Ok(result) + } + + /// Check if this is the first time when we're ever writing to this key. + /// This has impact on amount of pubdata that we have to spend for the write. + pub fn is_write_initial_internal(&self, key: &StorageKey) -> eyre::Result { + // Currently we don't have the zks API to return us the information on whether a given + // key was written to before a given block. + // This means, we have to depend on the following heuristic: we'll read the value of the slot. + // - if value != 0 -> this means that the slot was written to in the past (so we can return intitial_write = false) + // - but if the value = 0 - there is a chance, that slot was written to in the past - and later was reset. + // but unfortunately we cannot detect that with the current zks api, so we'll attempt to do it + // only on local storage. + let value = self.read_value_internal(key)?; + if value != H256::zero() { + return Ok(false); + } + + // If value was 0, there is still a chance, that the slot was written to in the past - and only now set to 0. + // We unfortunately don't have the API to check it on the fork, but we can at least try to check it on local storage. + let mut mutator = self + .inner + .write() + .map_err(|err| eyre!("failed acquiring write lock on fork storage: {:?}", err))?; + Ok(mutator.raw_storage.is_write_initial(key)) + } + + /// Retrieves the enumeration index for a given `key`. + fn get_enumeration_index_internal(&self, _key: &StorageKey) -> Option { + // TODO: Update this file to use proper enumeration index value once it's exposed for forks via API + Some(0_u64) + } + + /// Creates a serializable representation of current storage state. It will contain both locally + /// stored data and cached data read from the fork. + pub fn dump_state(&self) -> SerializableForkStorage { + let inner = self.inner.read().unwrap(); + let mut state = BTreeMap::from_iter(inner.value_read_cache.clone()); + state.extend(inner.raw_storage.state.clone()); + let mut factory_deps = BTreeMap::from_iter( + inner + .factory_dep_cache + .iter() + // Ignore cache misses + .filter_map(|(k, v)| v.as_ref().map(|v| (k, v))) + .map(|(k, v)| (*k, Bytes::from(v.clone()))), + ); + factory_deps.extend( + inner + .raw_storage + .factory_deps + .iter() + .map(|(k, v)| (*k, Bytes::from(v.clone()))), + ); + + SerializableForkStorage { + storage: SerializableStorage(state), + factory_deps, + } + } + + pub fn load_state(&self, state: SerializableForkStorage) { + tracing::trace!( + slots = state.storage.0.len(), + factory_deps = state.factory_deps.len(), + "loading fork storage from supplied state" + ); + let mut inner = self.inner.write().unwrap(); + inner.raw_storage.state.extend(state.storage.0); + inner + .raw_storage + .factory_deps + .extend(state.factory_deps.into_iter().map(|(k, v)| (k, v.0))); + } +} + +impl ReadStorage for ForkStorage { + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + self.is_write_initial_internal(key).unwrap() + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + self.load_factory_dep_internal(hash).unwrap() + } + + fn read_value(&mut self, key: &StorageKey) -> zksync_types::StorageValue { + self.read_value_internal(key).unwrap() + } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + self.get_enumeration_index_internal(key) + } +} + +impl ReadStorage for &ForkStorage { + fn read_value(&mut self, key: &StorageKey) -> zksync_types::StorageValue { + self.read_value_internal(key).unwrap() + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + self.is_write_initial_internal(key).unwrap() + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + self.load_factory_dep_internal(hash).unwrap() + } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + self.get_enumeration_index_internal(key) + } +} + +#[async_trait] +impl ReadStorageDyn for ForkStorage { + fn dyn_cloned(&self) -> Box { + Box::new(self.clone()) + } + + async fn read_value_alt(&self, key: &StorageKey) -> anyhow::Result { + // TODO: Get rid of `block_on` inside to propagate asynchronous execution up to this level + self.read_value_internal(key) + .map_err(|e| anyhow::anyhow!("failed reading value: {:?}", e)) + } + + async fn load_factory_dep_alt(&self, hash: H256) -> anyhow::Result>> { + // TODO: Get rid of `block_on` inside to propagate asynchronous execution up to this level + self.load_factory_dep_internal(hash) + .map_err(|e| anyhow::anyhow!("failed to load factory dep: {:?}", e)) + } +} + +impl ForkStorage { + pub fn set_value(&self, key: StorageKey, value: zksync_types::StorageValue) { + let mut mutator = self.inner.write().unwrap(); + mutator.raw_storage.set_value(key, value) + } + pub fn store_factory_dep(&self, hash: H256, bytecode: Vec) { + let mut mutator = self.inner.write().unwrap(); + mutator.raw_storage.store_factory_dep(hash, bytecode) + } + pub fn set_chain_id(&mut self, id: L2ChainId) { + self.chain_id = id; + let mut mutator = self.inner.write().unwrap(); + mutator.raw_storage.set_value( + get_system_context_key(SYSTEM_CONTEXT_CHAIN_ID_POSITION), + H256::from_low_u64_be(id.as_u64()), + ); + } +} + +/// Serializable representation of [`ForkStorage`]'s state. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct SerializableForkStorage { + /// Node's current key-value storage state (contains both local and cached fork data if applicable). + pub storage: SerializableStorage, + /// Factory dependencies by their hash. + pub factory_deps: BTreeMap, +} + +/// Wrapper for [`BTreeMap`] to avoid serializing [`StorageKey`] as a struct. +/// JSON does not support non-string keys so we use conversion to [`Bytes`] via [`crate::node::state::SerializableStorageKey`] +/// instead. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde( + into = "BTreeMap", + from = "BTreeMap" +)] +pub struct SerializableStorage(pub BTreeMap); + +mod serde_from { + use super::SerializableStorage; + use serde::{Deserialize, Serialize}; + use std::collections::BTreeMap; + use std::convert::TryFrom; + use zksync_types::web3::Bytes; + use zksync_types::{AccountTreeId, Address, StorageKey, StorageValue, H256}; + + impl From> for SerializableStorage { + fn from(value: BTreeMap) -> Self { + SerializableStorage(value.into_iter().map(|(k, v)| (k.into(), v)).collect()) + } + } + + impl From for BTreeMap { + fn from(value: SerializableStorage) -> Self { + value.0.into_iter().map(|(k, v)| (k.into(), v)).collect() + } + } + + #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] + #[serde(into = "Bytes", try_from = "Bytes")] + pub struct SerializableStorageKey(StorageKey); + + impl From for SerializableStorageKey { + fn from(value: StorageKey) -> Self { + SerializableStorageKey(value) + } + } + + impl From for StorageKey { + fn from(value: SerializableStorageKey) -> Self { + value.0 + } + } + + impl TryFrom for SerializableStorageKey { + type Error = anyhow::Error; + + fn try_from(bytes: Bytes) -> anyhow::Result { + if bytes.0.len() != 52 { + anyhow::bail!("invalid bytes length (expected 52, got {})", bytes.0.len()) + } + let address = Address::from_slice(&bytes.0[0..20]); + let key = H256::from_slice(&bytes.0[20..52]); + Ok(SerializableStorageKey(StorageKey::new( + AccountTreeId::new(address), + key, + ))) + } + } + + impl From for Bytes { + fn from(value: SerializableStorageKey) -> Self { + let bytes = [value.0.address().as_bytes(), value.0.key().as_bytes()].concat(); + bytes.into() + } + } +} + +#[cfg(test)] +mod tests { + use super::ForkStorage; + use crate::deps::InMemoryStorage; + use crate::node::fork::{Fork, ForkClient, ForkDetails}; + use anvil_zksync_config::constants::{ + DEFAULT_ESTIMATE_GAS_PRICE_SCALE_FACTOR, DEFAULT_ESTIMATE_GAS_SCALE_FACTOR, + DEFAULT_FAIR_PUBDATA_PRICE, DEFAULT_L2_GAS_PRICE, TEST_NODE_NETWORK_ID, + }; + use anvil_zksync_config::types::{CacheConfig, SystemContractsOptions}; + use zksync_multivm::interface::storage::ReadStorage; + use zksync_types::{api::TransactionVariant, L2BlockNumber, StorageKey}; + use zksync_types::{ + get_system_context_key, AccountTreeId, L1BatchNumber, L2ChainId, H256, + SYSTEM_CONTEXT_CHAIN_ID_POSITION, + }; + + #[test] + fn test_initial_writes() { + let account = AccountTreeId::default(); + let never_written_key = StorageKey::new(account, H256::from_low_u64_be(1)); + let key_with_some_value = StorageKey::new(account, H256::from_low_u64_be(2)); + let key_with_value_0 = StorageKey::new(account, H256::from_low_u64_be(3)); + let mut in_memory_storage = InMemoryStorage::default(); + in_memory_storage.set_value(key_with_some_value, H256::from_low_u64_be(13)); + in_memory_storage.set_value(key_with_value_0, H256::from_low_u64_be(0)); + + let fork_details = ForkDetails { + chain_id: TEST_NODE_NETWORK_ID.into(), + batch_number: L1BatchNumber(1), + block_number: L2BlockNumber(1), + block_hash: H256::zero(), + block_timestamp: 0, + api_block: zksync_types::api::Block::::default(), + l1_gas_price: 100, + l2_fair_gas_price: DEFAULT_L2_GAS_PRICE, + fair_pubdata_price: DEFAULT_FAIR_PUBDATA_PRICE, + estimate_gas_price_scale_factor: DEFAULT_ESTIMATE_GAS_PRICE_SCALE_FACTOR, + estimate_gas_scale_factor: DEFAULT_ESTIMATE_GAS_SCALE_FACTOR, + ..Default::default() + }; + let client = ForkClient::mock(fork_details, in_memory_storage); + let fork = Fork::new(Some(client), CacheConfig::None); + + let options = SystemContractsOptions::default(); + let mut fork_storage: ForkStorage = ForkStorage::new(fork, &options, false, None); + + assert!(fork_storage.is_write_initial(&never_written_key)); + assert!(!fork_storage.is_write_initial(&key_with_some_value)); + // This is the current limitation of the system. In theory, this should return false - as the value was written, but we don't have the API to the + // backend to get this information. + assert!(fork_storage.is_write_initial(&key_with_value_0)); + + // But writing any value there in the local storage (even 0) - should make it non-initial write immediately. + fork_storage.set_value(key_with_value_0, H256::zero()); + assert!(!fork_storage.is_write_initial(&key_with_value_0)); + } + + #[test] + fn test_fork_storage_set_chain_id() { + let fork_details = ForkDetails { + chain_id: TEST_NODE_NETWORK_ID.into(), + batch_number: L1BatchNumber(0), + block_number: L2BlockNumber(0), + block_hash: H256::zero(), + block_timestamp: 0, + api_block: zksync_types::api::Block::::default(), + l1_gas_price: 0, + l2_fair_gas_price: 0, + fair_pubdata_price: 0, + estimate_gas_price_scale_factor: 0.0, + estimate_gas_scale_factor: 0.0, + ..Default::default() + }; + let client = ForkClient::mock(fork_details, InMemoryStorage::default()); + let fork = Fork::new(Some(client), CacheConfig::None); + let mut fork_storage: ForkStorage = + ForkStorage::new(fork, &SystemContractsOptions::default(), false, None); + let new_chain_id = L2ChainId::from(261); + fork_storage.set_chain_id(new_chain_id); + + let inner = fork_storage.inner.read().unwrap(); + + assert_eq!(new_chain_id, fork_storage.chain_id); + assert_eq!( + H256::from_low_u64_be(new_chain_id.as_u64()), + *inner + .raw_storage + .state + .get(&get_system_context_key(SYSTEM_CONTEXT_CHAIN_ID_POSITION)) + .unwrap() + ); + } +} diff --git a/crates/core/src/node/inner/in_memory_inner.rs b/crates/core/src/node/inner/in_memory_inner.rs index 7b0d0e11..fa5c3a16 100644 --- a/crates/core/src/node/inner/in_memory_inner.rs +++ b/crates/core/src/node/inner/in_memory_inner.rs @@ -1,12 +1,13 @@ -use super::blockchain::{Blockchain, ReadBlockchain}; -use super::fork::{ForkDetails, ForkStorage, SerializableStorage}; -use super::time::Time; use crate::bootloader_debug::{BootloaderDebug, BootloaderDebugTracer}; use crate::console_log::ConsoleLogHandler; use crate::deps::storage_view::StorageView; use crate::filters::EthFilters; use crate::node::call_error_tracer::CallErrorTracer; use crate::node::error::LoadStateError; +use crate::node::inner::blockchain::{Blockchain, ReadBlockchain}; +use crate::node::inner::fork::{Fork, ForkClient, ForkSource}; +use crate::node::inner::fork_storage::{ForkStorage, SerializableStorage}; +use crate::node::inner::time::Time; use crate::node::keys::StorageKeyLayout; use crate::node::state::StateV1; use crate::node::storage_logs::print_storage_logs_details; @@ -82,6 +83,7 @@ pub struct InMemoryNodeInner { // TODO: Make private // Underlying storage pub fork_storage: ForkStorage, + pub(super) fork: Fork, // Configuration. pub config: TestNodeConfig, pub console_log_handler: ConsoleLogHandler, @@ -100,6 +102,7 @@ impl InMemoryNodeInner { blockchain: Blockchain, time: Time, fork_storage: ForkStorage, + fork: Fork, fee_input_provider: TestNodeFeeInputProvider, filters: Arc>, config: TestNodeConfig, @@ -113,6 +116,7 @@ impl InMemoryNodeInner { fee_input_provider, filters, fork_storage, + fork, config, console_log_handler: ConsoleLogHandler::default(), system_contracts, @@ -160,17 +164,13 @@ impl InMemoryNodeInner { timestamp: self.time.peek_next_timestamp(), }; - let fee_input = if let Some(fork) = &self - .fork_storage - .inner - .read() - .expect("fork_storage lock is already held by the current thread") - .fork - { + let fee_input = if let Some(fork_details) = self.fork.details() { + // TODO: This is a weird pattern. `TestNodeFeeInputProvider` should encapsulate fork's + // behavior by taking fork's fee input into account during initialization. BatchFeeInput::PubdataIndependent(PubdataIndependentBatchFeeModelInput { - l1_gas_price: fork.l1_gas_price, - fair_l2_gas_price: fork.l2_fair_gas_price, - fair_pubdata_price: fork.fair_pubdata_price, + l1_gas_price: fork_details.l1_gas_price, + fair_l2_gas_price: fork_details.l2_fair_gas_price, + fair_pubdata_price: fork_details.fair_pubdata_price, }) } else { self.fee_input_provider.get_batch_fee_input() @@ -1250,41 +1250,28 @@ impl InMemoryNodeInner { .and_then(|state| state.get(&storage_key)) .cloned() .unwrap_or_default(); - - if value.is_zero() { - match self.fork_storage.read_value_internal(&storage_key) { - Ok(value) => Ok(H256(value.0)), - Err(error) => Err(Web3Error::InternalError(anyhow::anyhow!( - "failed to read storage: {}", - error - ))), - } - } else { - Ok(value) + if !value.is_zero() { + return Ok(value); + } + // TODO: Check if the rest of the logic below makes sense. + // AFAIU this branch can only be entered if the block was produced locally, but + // we query the fork regardless? + match self.fork_storage.read_value_internal(&storage_key) { + Ok(value) => Ok(H256(value.0)), + Err(error) => Err(Web3Error::InternalError(anyhow::anyhow!( + "failed to read storage: {}", + error + ))), } } else { - self.fork_storage - .inner - .read() - .expect("failed reading fork storage") - .fork - .as_ref() - .and_then(|fork| fork.fork_source.get_storage_at(address, idx, block).ok()) - .ok_or_else(|| { - tracing::error!( - "unable to get storage at address {:?}, index {:?} for block {:?}", - address, - idx, - block - ); - Web3Error::InternalError(anyhow::Error::msg("Failed to get storage.")) - }) + Ok(self.fork.get_storage_at(address, idx, block).await?) } } - pub async fn reset(&mut self, fork: Option) { + pub async fn reset(&mut self, fork_client_opt: Option) { + let fork_details = fork_client_opt.as_ref().map(|client| &client.details); let blockchain = Blockchain::new( - fork.as_ref(), + fork_details, self.config.genesis.as_ref(), self.config.genesis_timestamp, ); @@ -1295,15 +1282,16 @@ impl InMemoryNodeInner { )); self.time.set_current_timestamp_unchecked( - fork.as_ref() - .map(|f| f.block_timestamp) + fork_details + .map(|fd| fd.block_timestamp) .unwrap_or(NON_FORK_FIRST_BLOCK_TIMESTAMP), ); drop(std::mem::take(&mut *self.filters.write().await)); + self.fork.reset_fork_client(fork_client_opt); let fork_storage = ForkStorage::new( - fork, + self.fork.clone(), &self.config.system_contracts_options, self.config.use_evm_emulator, self.config.chain_id, @@ -1313,7 +1301,6 @@ impl InMemoryNodeInner { old_storage.raw_storage = std::mem::take(&mut new_storage.raw_storage); old_storage.value_read_cache = std::mem::take(&mut new_storage.value_read_cache); old_storage.factory_dep_cache = std::mem::take(&mut new_storage.factory_dep_cache); - old_storage.fork = std::mem::take(&mut new_storage.fork); self.fork_storage.chain_id = fork_storage.chain_id; drop(old_storage); drop(new_storage); @@ -1405,7 +1392,7 @@ impl InMemoryNodeInner { } else { StorageKeyLayout::ZkEra }; - let (inner, _, _, _) = InMemoryNodeInner::init( + let (inner, _, _, _, _) = InMemoryNodeInner::init( None, fee_provider, Arc::new(RwLock::new(Default::default())), @@ -1566,15 +1553,16 @@ impl InMemoryNodeInner { mod tests { use super::*; use crate::node::create_genesis; - use crate::node::fork::ForkStorage; + use crate::node::fork::ForkDetails; + use crate::node::inner::fork_storage::ForkStorage; use crate::testing; - use crate::testing::{ExternalStorage, TransactionBuilder, STORAGE_CONTRACT_BYTECODE}; + use crate::testing::{TransactionBuilder, STORAGE_CONTRACT_BYTECODE}; use anvil_zksync_config::constants::{ DEFAULT_ACCOUNT_BALANCE, DEFAULT_ESTIMATE_GAS_PRICE_SCALE_FACTOR, DEFAULT_ESTIMATE_GAS_SCALE_FACTOR, DEFAULT_FAIR_PUBDATA_PRICE, DEFAULT_L2_GAS_PRICE, TEST_NODE_NETWORK_ID, }; - use anvil_zksync_config::types::{CacheConfig, SystemContractsOptions}; + use anvil_zksync_config::types::SystemContractsOptions; use anvil_zksync_config::TestNodeConfig; use ethabi::{ParamType, Token, Uint}; use itertools::Itertools; @@ -1690,7 +1678,7 @@ mod tests { } #[tokio::test] - async fn test_run_l2_tx_raw_does_not_panic_on_external_storage_call() { + async fn test_run_l2_tx_raw_does_not_panic_on_mock_fork_client_call() { // Perform a transaction to get storage to an intermediate state let inner = InMemoryNodeInner::test(); let mut node = inner.write().await; @@ -1701,31 +1689,29 @@ mod tests { .system_contracts .system_contracts_for_initiator(&node.impersonation, &tx.initiator_account()); node.seal_block(vec![tx], system_contracts).await.unwrap(); - let external_storage = node.fork_storage.clone(); - // Execute next transaction using a fresh in-memory node and the external fork storage - let mock_db = ExternalStorage { - raw_storage: external_storage.inner.read().unwrap().raw_storage.clone(), + // Execute next transaction using a fresh in-memory node and mocked fork client + let fork_details = ForkDetails { + chain_id: TEST_NODE_NETWORK_ID.into(), + batch_number: L1BatchNumber(1), + block_number: L2BlockNumber(2), + block_hash: Default::default(), + block_timestamp: 1002, + api_block: api::Block::default(), + l1_gas_price: 1000, + l2_fair_gas_price: DEFAULT_L2_GAS_PRICE, + fair_pubdata_price: DEFAULT_FAIR_PUBDATA_PRICE, + estimate_gas_price_scale_factor: DEFAULT_ESTIMATE_GAS_PRICE_SCALE_FACTOR, + estimate_gas_scale_factor: DEFAULT_ESTIMATE_GAS_SCALE_FACTOR, + ..Default::default() }; + let mock_fork_client = ForkClient::mock( + fork_details, + node.fork_storage.inner.read().unwrap().raw_storage.clone(), + ); let impersonation = ImpersonationManager::default(); - let (node, _, _, _) = InMemoryNodeInner::init( - Some(ForkDetails { - fork_source: Box::new(mock_db), - chain_id: TEST_NODE_NETWORK_ID.into(), - l1_block: L1BatchNumber(1), - l2_block: api::Block::default(), - l2_miniblock: 2, - l2_miniblock_hash: Default::default(), - block_timestamp: 1002, - overwrite_chain_id: None, - l1_gas_price: 1000, - l2_fair_gas_price: DEFAULT_L2_GAS_PRICE, - fair_pubdata_price: DEFAULT_FAIR_PUBDATA_PRICE, - fee_params: None, - estimate_gas_price_scale_factor: DEFAULT_ESTIMATE_GAS_PRICE_SCALE_FACTOR, - estimate_gas_scale_factor: DEFAULT_ESTIMATE_GAS_SCALE_FACTOR, - cache_config: CacheConfig::default(), - }), + let (node, _, _, _, _) = InMemoryNodeInner::init( + Some(mock_fork_client), TestNodeFeeInputProvider::default(), Arc::new(RwLock::new(Default::default())), TestNodeConfig::default(), @@ -1742,7 +1728,7 @@ mod tests { .system_contracts_for_initiator(&node.impersonation, &tx.initiator_account()); let (_, _, mut vm) = test_vm(&mut node, system_contracts).await; node.run_l2_tx_raw(tx, &mut vm) - .expect("transaction must pass with external storage"); + .expect("transaction must pass with mock fork client"); } #[tokio::test] diff --git a/crates/core/src/node/inner/mod.rs b/crates/core/src/node/inner/mod.rs index 3b80f0c3..3a7baa5e 100644 --- a/crates/core/src/node/inner/mod.rs +++ b/crates/core/src/node/inner/mod.rs @@ -11,11 +11,13 @@ //! are available outside of this module) pub mod blockchain; pub mod fork; +mod fork_storage; mod in_memory_inner; pub mod node_executor; pub mod storage; pub mod time; +pub use fork_storage::{SerializableForkStorage, SerializableStorage}; pub use in_memory_inner::{InMemoryNodeInner, TxExecutionOutput}; use crate::filters::EthFilters; @@ -27,7 +29,8 @@ use crate::system_contracts::SystemContracts; use anvil_zksync_config::constants::NON_FORK_FIRST_BLOCK_TIMESTAMP; use anvil_zksync_config::TestNodeConfig; use blockchain::ReadBlockchain; -use fork::{ForkDetails, ForkStorage}; +use fork::{Fork, ForkClient, ForkSource}; +use fork_storage::ForkStorage; use std::sync::Arc; use time::{ReadTime, Time}; use tokio::sync::RwLock; @@ -36,7 +39,7 @@ impl InMemoryNodeInner { // TODO: Bake in Arc> into the struct itself #[allow(clippy::type_complexity)] pub fn init( - fork: Option, + fork_client_opt: Option, fee_input_provider: TestNodeFeeInputProvider, filters: Arc>, config: TestNodeConfig, @@ -48,20 +51,26 @@ impl InMemoryNodeInner { Box, Box, Box, + Box, ) { + // TODO: We wouldn't have to clone cache config here if there was a proper per-component + // config separation. + let fork = Fork::new(fork_client_opt, config.cache_config.clone()); + let fork_details = fork.details(); let time = Time::new( - fork.as_ref() - .map(|f| f.block_timestamp) + fork_details + .as_ref() + .map(|fd| fd.block_timestamp) .unwrap_or(NON_FORK_FIRST_BLOCK_TIMESTAMP), ); let blockchain = Blockchain::new( - fork.as_ref(), + fork_details.as_ref(), config.genesis.as_ref(), config.genesis_timestamp, ); // TODO: Create read-only/mutable versions of `ForkStorage` like `blockchain` and `time` above let fork_storage = ForkStorage::new( - fork, + fork.clone(), &config.system_contracts_options, config.use_evm_emulator, config.chain_id, @@ -71,6 +80,7 @@ impl InMemoryNodeInner { blockchain.clone(), time.clone(), fork_storage.clone(), + fork.clone(), fee_input_provider.clone(), filters, config.clone(), @@ -84,6 +94,7 @@ impl InMemoryNodeInner { Box::new(fork_storage), Box::new(blockchain), Box::new(time), + Box::new(fork), ) } } diff --git a/crates/core/src/node/inner/node_executor.rs b/crates/core/src/node/inner/node_executor.rs index f963c8ba..9f1fddf7 100644 --- a/crates/core/src/node/inner/node_executor.rs +++ b/crates/core/src/node/inner/node_executor.rs @@ -1,9 +1,12 @@ use super::InMemoryNodeInner; +use crate::node::fork::ForkConfig; +use crate::node::inner::fork::{ForkClient, ForkSource}; use crate::node::keys::StorageKeyLayout; use crate::node::pool::TxBatch; use crate::system_contracts::SystemContracts; use std::sync::Arc; use tokio::sync::{mpsc, oneshot, RwLock}; +use url::Url; use zksync_multivm::interface::TxExecutionMode; use zksync_types::bytecode::BytecodeHash; use zksync_types::utils::nonces_to_full_nonce; @@ -54,6 +57,18 @@ impl NodeExecutor { Command::SetNonce(address, nonce, reply) => { self.set_nonce(address, nonce, reply).await; } + Command::ResetFork(url, block_number, reply) => { + self.reset_fork(url, block_number, reply).await; + } + Command::ResetForkBlockNumber(block_number, reply) => { + self.reset_fork_block_number(block_number, reply).await; + } + Command::SetForkUrl(url, reply) => { + self.set_fork_url(url, reply).await; + } + Command::RemoveFork(reply) => { + self.remove_fork(reply).await; + } Command::IncreaseTime(delta, reply) => { self.increase_time(delta, reply).await; } @@ -216,6 +231,98 @@ impl NodeExecutor { } } + async fn reset_fork( + &self, + url: Url, + block_number: Option, + reply: oneshot::Sender>, + ) { + let result = async { + // We don't know what chain this is so we assume default scale configuration. + let fork_client = + ForkClient::at_block_number(ForkConfig::unknown(url), block_number).await?; + self.node_inner.write().await.reset(Some(fork_client)).await; + + anyhow::Ok(()) + } + .await; + + // Reply to sender if we can, otherwise hold result for further processing + let result = if let Err(result) = reply.send(result) { + tracing::info!("failed to reply as receiver has been dropped"); + result + } else { + return; + }; + // Not much we can do with an error at this level so we just print it + if let Err(err) = result { + tracing::error!("failed to reset fork: {:#?}", err); + } + } + + async fn reset_fork_block_number( + &self, + block_number: L2BlockNumber, + reply: oneshot::Sender>, + ) { + let result = async { + let node_inner = self.node_inner.write().await; + let url = node_inner + .fork + .url() + .ok_or_else(|| anyhow::anyhow!("no existing fork found"))?; + // Keep scale factors as this is the same chain. + let details = node_inner + .fork + .details() + .ok_or_else(|| anyhow::anyhow!("no existing fork found"))?; + let fork_client = ForkClient::at_block_number( + ForkConfig { + url, + estimate_gas_price_scale_factor: details.estimate_gas_price_scale_factor, + estimate_gas_scale_factor: details.estimate_gas_scale_factor, + }, + Some(block_number), + ) + .await?; + self.node_inner.write().await.reset(Some(fork_client)).await; + + anyhow::Ok(()) + } + .await; + + // Reply to sender if we can, otherwise hold result for further processing + let result = if let Err(result) = reply.send(result) { + tracing::info!("failed to reply as receiver has been dropped"); + result + } else { + return; + }; + // Not much we can do with an error at this level so we just print it + if let Err(err) = result { + tracing::error!("failed to reset fork: {:#?}", err); + } + } + + async fn set_fork_url(&self, url: Url, reply: oneshot::Sender>) { + let node_inner = self.node_inner.write().await; + let old_url = node_inner.fork.set_fork_url(url); + + // Reply to sender if we can + if reply.send(old_url).is_err() { + tracing::info!("failed to reply as receiver has been dropped"); + } + } + + async fn remove_fork(&self, reply: oneshot::Sender<()>) { + self.node_inner.write().await.reset(None).await; + + // Reply to sender if we can + if reply.send(()).is_err() { + tracing::info!("failed to reply as receiver has been dropped"); + } + } + async fn increase_time(&self, delta: u64, reply: oneshot::Sender<()>) { self.node_inner.write().await.time.increase_time(delta); // Reply to sender if we can @@ -407,6 +514,78 @@ impl NodeExecutorHandle { } } + /// Request [`NodeExecutor`] to reset fork to given url and block number. All local state will + /// be wiped. Waits for the change to take place. + pub async fn reset_fork_sync( + &self, + url: Url, + block_number: Option, + ) -> anyhow::Result<()> { + let (response_sender, response_receiver) = oneshot::channel(); + self.command_sender + .send(Command::ResetFork(url, block_number, response_sender)) + .await + .map_err(|_| anyhow::anyhow!("failed to reset fork as node executor is dropped"))?; + match response_receiver.await { + Ok(result) => result, + Err(_) => { + anyhow::bail!("failed to reset fork as node executor is dropped") + } + } + } + + /// Request [`NodeExecutor`] to reset fork at the given block number. All state will be wiped. + /// Fails if there is no existing fork. Waits for the change to take place. + pub async fn reset_fork_block_number_sync( + &self, + block_number: L2BlockNumber, + ) -> anyhow::Result<()> { + let (response_sender, response_receiver) = oneshot::channel(); + self.command_sender + .send(Command::ResetForkBlockNumber(block_number, response_sender)) + .await + .map_err(|_| { + anyhow::anyhow!("failed to reset fork block number as node executor is dropped") + })?; + match response_receiver.await { + Ok(result) => result, + Err(_) => { + anyhow::bail!("failed to reset fork block number as node executor is dropped") + } + } + } + + /// Request [`NodeExecutor`] to set fork's RPC URL without resetting the state. Waits for the + /// change to take place. Returns `Some(previous_url)` if fork existed and `None` otherwise. + pub async fn set_fork_url_sync(&self, url: Url) -> anyhow::Result> { + let (response_sender, response_receiver) = oneshot::channel(); + self.command_sender + .send(Command::SetForkUrl(url, response_sender)) + .await + .map_err(|_| anyhow::anyhow!("failed to set fork URL as node executor is dropped"))?; + match response_receiver.await { + Ok(result) => Ok(result), + Err(_) => { + anyhow::bail!("failed to set fork URL as node executor is dropped") + } + } + } + + /// Request [`NodeExecutor`] to remove fork if there is one. Waits for the change to take place. + pub async fn remove_fork_sync(&self) -> anyhow::Result<()> { + let (response_sender, response_receiver) = oneshot::channel(); + self.command_sender + .send(Command::RemoveFork(response_sender)) + .await + .map_err(|_| anyhow::anyhow!("failed to remove fork as node executor is dropped"))?; + match response_receiver.await { + Ok(()) => Ok(()), + Err(_) => { + anyhow::bail!("failed to remove fork as node executor is dropped") + } + } + } + /// Request [`NodeExecutor`] to increase time by the given delta (in seconds). Waits for the /// change to take place. pub async fn increase_time_sync(&self, delta: u64) -> anyhow::Result<()> { @@ -502,6 +681,15 @@ enum Command { SetStorage(StorageKey, U256, oneshot::Sender<()>), SetBalance(Address, U256, oneshot::Sender<()>), SetNonce(Address, U256, oneshot::Sender<()>), + // Fork manipulation commands + ResetFork( + Url, + Option, + oneshot::Sender>, + ), + ResetForkBlockNumber(L2BlockNumber, oneshot::Sender>), + SetForkUrl(Url, oneshot::Sender>), + RemoveFork(oneshot::Sender<()>), // Time manipulation commands. Caveat: reply-able commands can hold user connections alive for // a long time (until the command is processed). IncreaseTime(u64, oneshot::Sender<()>), diff --git a/crates/core/src/node/state.rs b/crates/core/src/node/state.rs index 9cf6f253..6c36a052 100644 --- a/crates/core/src/node/state.rs +++ b/crates/core/src/node/state.rs @@ -1,4 +1,4 @@ -use super::inner::fork::{SerializableForkStorage, SerializableStorage}; +use super::inner::{SerializableForkStorage, SerializableStorage}; use super::TransactionResult; use serde::{Deserialize, Serialize}; use zksync_types::api::{Block, TransactionVariant}; diff --git a/crates/core/src/node/zks.rs b/crates/core/src/node/zks.rs index 5c776c14..00704f6e 100644 --- a/crates/core/src/node/zks.rs +++ b/crates/core/src/node/zks.rs @@ -1,5 +1,4 @@ use crate::node::InMemoryNode; -use crate::utils::internal_error; use anyhow::Context; use std::collections::HashMap; use zksync_types::api; @@ -25,7 +24,7 @@ impl InMemoryNode { .blockchain .get_block_tx_hashes_by_number(block_number) .await; - let transactions = if let Some(tx_hashes) = tx_hashes { + if let Some(tx_hashes) = tx_hashes { let mut transactions = Vec::with_capacity(tx_hashes.len()); for tx_hash in tx_hashes { let transaction = self @@ -35,46 +34,18 @@ impl InMemoryNode { .with_context(|| anyhow::anyhow!("Unexpectedly transaction (hash={tx_hash}) belongs to a block but could not be found"))?; transactions.push(transaction); } - transactions + Ok(transactions) } else { - let reader = self.inner.read().await; - let fork_storage_read = reader - .fork_storage - .inner - .read() - .expect("failed reading fork storage"); - - match fork_storage_read.fork.as_ref() { - Some(fork) => fork - .fork_source - .get_raw_block_transactions(block_number) - .map_err(|e| internal_error("get_raw_block_transactions", e))?, - None => return Err(Web3Error::NoBlock), - } - }; - - Ok(transactions) + Ok(self.fork.get_raw_block_transactions(block_number).await?) + } } pub async fn get_bridge_contracts_impl(&self) -> Result { - let reader = self.inner.read().await; - - let result = match reader - .fork_storage - .inner - .read() - .expect("failed reading fork storage") + Ok(self .fork - .as_ref() - { - Some(fork) => fork.fork_source.get_bridge_contracts().map_err(|err| { - tracing::error!("failed fetching bridge contracts from the fork: {:?}", err); - Web3Error::InternalError(anyhow::Error::msg(format!( - "failed fetching bridge contracts from the fork: {:?}", - err - ))) - })?, - None => api::BridgeAddresses { + .get_bridge_contracts() + .await? + .unwrap_or(api::BridgeAddresses { l1_shared_default_bridge: Default::default(), l2_shared_default_bridge: Default::default(), l1_erc20_default_bridge: Default::default(), @@ -82,10 +53,7 @@ impl InMemoryNode { l1_weth_bridge: Default::default(), l2_weth_bridge: Default::default(), l2_legacy_shared_bridge: Default::default(), - }, - }; - - Ok(result) + })) } pub async fn get_confirmed_tokens_impl( @@ -93,29 +61,17 @@ impl InMemoryNode { from: u32, limit: u8, ) -> anyhow::Result> { - let reader = self.inner.read().await; - - let fork_storage_read = reader - .fork_storage - .inner - .read() - .expect("failed reading fork storage"); - - match fork_storage_read.fork.as_ref() { - Some(fork) => Ok(fork - .fork_source - .get_confirmed_tokens(from, limit) - .map_err(|e| { - anyhow::anyhow!("failed fetching bridge contracts from the fork: {:?}", e) - })?), - None => Ok(vec![zksync_web3_decl::types::Token { + Ok(self + .fork + .get_confirmed_tokens(from, limit) + .await? + .unwrap_or(vec![zksync_web3_decl::types::Token { l1_address: Address::zero(), l2_address: L2_BASE_TOKEN_ADDRESS, name: "Ether".to_string(), symbol: "ETH".to_string(), decimals: 18, - }]), - } + }])) } pub async fn get_all_account_balances_impl( @@ -163,81 +119,29 @@ impl InMemoryNode { ) .await; - let maybe_block_details = match block_details { - Some(block_details) => Some(block_details), - None => self - .inner - .read() - .await - .fork_storage - .inner - .read() - .expect("failed reading fork storage") - .fork - .as_ref() - .and_then(|fork| { - fork.fork_source - .get_block_details(block_number) - .ok() - .flatten() - }), - }; - - Ok(maybe_block_details) + match block_details { + Some(block_details) => Ok(Some(block_details)), + None => self.fork.get_block_details(block_number).await, + } } pub async fn get_transaction_details_impl( &self, hash: H256, ) -> anyhow::Result> { - let tx_details = self.blockchain.get_tx_details(&hash).await; - let maybe_tx_details = match tx_details { - Some(tx_details) => Some(tx_details), - None => self - .inner - .read() - .await - .fork_storage - .inner - .read() - .expect("failed reading fork storage") - .fork - .as_ref() - .and_then(|fork| { - fork.fork_source - .get_transaction_details(hash) - .ok() - .flatten() - }), - }; - - Ok(maybe_tx_details) + match self.blockchain.get_tx_details(&hash).await { + Some(tx_details) => Ok(Some(tx_details)), + None => self.fork.get_transaction_details(hash).await, + } } pub async fn get_bytecode_by_hash_impl(&self, hash: H256) -> anyhow::Result>> { if let Some(bytecode) = self.storage.load_factory_dep_alt(hash).await? { return Ok(Some(bytecode)); } + println!("Nope"); - let writer = self.inner.write().await; - let maybe_fork_details = &writer - .fork_storage - .inner - .read() - .expect("failed reading fork storage") - .fork; - if let Some(fork_details) = maybe_fork_details { - let maybe_bytecode = match fork_details.fork_source.get_bytecode_by_hash(hash) { - Ok(maybe_bytecode) => maybe_bytecode, - Err(error) => { - return Err(anyhow::anyhow!("failed to get bytecode: {:?}", error)); - } - }; - - Ok(maybe_bytecode) - } else { - Ok(None) - } + self.fork.get_bytecode_by_hash(hash).await } pub async fn get_base_token_l1_address_impl(&self) -> anyhow::Result
{ @@ -249,12 +153,11 @@ impl InMemoryNode { mod tests { use std::str::FromStr; - use anvil_zksync_config::types::CacheConfig; use zksync_types::{api, transaction_request::CallRequest, Address, H160, H256}; use zksync_types::{u256_to_h256, L1BatchNumber}; use super::*; - use crate::node::fork::ForkDetails; + use crate::node::fork::{ForkClient, ForkConfig}; use crate::node::TransactionResult; use crate::{ node::InMemoryNode, @@ -340,33 +243,23 @@ mod tests { }); let input_tx_hash = H256::repeat_byte(0x02); mock_server.expect( + "zks_getTransactionDetails", + Some(serde_json::json!([format!("{:#x}", input_tx_hash),])), serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "zks_getTransactionDetails", - "params": [ - format!("{:#x}", input_tx_hash), - ], - }), - serde_json::json!({ - "jsonrpc": "2.0", - "result": { - "isL1Originated": false, - "status": "included", - "fee": "0x74293f087500", - "gasPerPubdata": "0x4e20", - "initiatorAddress": "0x63ab285cd87a189f345fed7dd4e33780393e01f0", - "receivedAt": "2023-10-12T15:45:53.094Z", - "ethCommitTxHash": null, - "ethProveTxHash": null, - "ethExecuteTxHash": null - }, - "id": 0 + "isL1Originated": false, + "status": "included", + "fee": "0x74293f087500", + "gasPerPubdata": "0x4e20", + "initiatorAddress": "0x63ab285cd87a189f345fed7dd4e33780393e01f0", + "receivedAt": "2023-10-12T15:45:53.094Z", + "ethCommitTxHash": null, + "ethProveTxHash": null, + "ethExecuteTxHash": null }), ); let node = InMemoryNode::test(Some( - ForkDetails::from_network(&mock_server.url(), None, &CacheConfig::None) + ForkClient::at_block_number(ForkConfig::unknown(mock_server.url()), None) .await .unwrap(), )); @@ -414,17 +307,9 @@ mod tests { }); let miniblock = L2BlockNumber::from(16474138); mock_server.expect( + "zks_getBlockDetails", + Some(serde_json::json!([miniblock.0])), serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "zks_getBlockDetails", - "params": [ - miniblock.0, - ], - }), - serde_json::json!({ - "jsonrpc": "2.0", - "result": { "number": 16474138, "l1BatchNumber": 270435, "timestamp": 1697405098, @@ -447,13 +332,11 @@ mod tests { }, "operatorAddress": "0xa9232040bf0e0aea2578a5b2243f2916dbfc0a69", "protocolVersion": "Version15" - }, - "id": 0 }), ); let node = InMemoryNode::test(Some( - ForkDetails::from_network(&mock_server.url(), None, &CacheConfig::None) + ForkClient::at_block_number(ForkConfig::unknown(mock_server.url()), None) .await .unwrap(), )); @@ -511,27 +394,20 @@ mod tests { l2_legacy_shared_bridge: Some(H160::repeat_byte(0x6)), }; mock_server.expect( + "zks_getBridgeContracts", + None, serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "zks_getBridgeContracts", - }), - serde_json::json!({ - "jsonrpc": "2.0", - "result": { - "l1Erc20SharedBridge": format!("{:#x}", input_bridge_addresses.l1_shared_default_bridge.unwrap()), - "l2Erc20SharedBridge": format!("{:#x}", input_bridge_addresses.l2_shared_default_bridge.unwrap()), - "l1Erc20DefaultBridge": format!("{:#x}", input_bridge_addresses.l1_erc20_default_bridge.unwrap()), - "l2Erc20DefaultBridge": format!("{:#x}", input_bridge_addresses.l2_erc20_default_bridge.unwrap()), - "l1WethBridge": format!("{:#x}", input_bridge_addresses.l1_weth_bridge.unwrap()), - "l2WethBridge": format!("{:#x}", input_bridge_addresses.l2_weth_bridge.unwrap()) - }, - "id": 0 + "l1Erc20SharedBridge": format!("{:#x}", input_bridge_addresses.l1_shared_default_bridge.unwrap()), + "l2Erc20SharedBridge": format!("{:#x}", input_bridge_addresses.l2_shared_default_bridge.unwrap()), + "l1Erc20DefaultBridge": format!("{:#x}", input_bridge_addresses.l1_erc20_default_bridge.unwrap()), + "l2Erc20DefaultBridge": format!("{:#x}", input_bridge_addresses.l2_erc20_default_bridge.unwrap()), + "l1WethBridge": format!("{:#x}", input_bridge_addresses.l1_weth_bridge.unwrap()), + "l2WethBridge": format!("{:#x}", input_bridge_addresses.l2_weth_bridge.unwrap()) }), ); let node = InMemoryNode::test(Some( - ForkDetails::from_network(&mock_server.url(), None, &CacheConfig::None) + ForkClient::at_block_number(ForkConfig::unknown(mock_server.url()), None) .await .unwrap(), )); @@ -567,7 +443,8 @@ mod tests { assert_eq!(input_bytecode, actual); } - #[tokio::test] + // FIXME: Multi-threaded flavor is needed because of the `block_on` mess inside `ForkStorage`. + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_get_bytecode_by_hash_uses_fork_if_value_unavailable() { // Arrange let mock_server = MockServer::run_with_config(ForkBlockConfig { @@ -578,23 +455,13 @@ mod tests { let input_hash = H256::repeat_byte(0x1); let input_bytecode = vec![0x1]; mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "zks_getBytecodeByHash", - "params": [ - format!("{:#x}", input_hash) - ], - }), - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "result": input_bytecode, - }), + "zks_getBytecodeByHash", + Some(serde_json::json!([format!("{:#x}", input_hash)])), + serde_json::json!(input_bytecode), ); let node = InMemoryNode::test(Some( - ForkDetails::from_network(&mock_server.url(), None, &CacheConfig::None) + ForkClient::at_block_number(ForkConfig::unknown(mock_server.url()), None) .await .unwrap(), )); @@ -657,68 +524,60 @@ mod tests { }); let miniblock = L2BlockNumber::from(16474138); mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "zks_getRawBlockTransactions", - "params": [miniblock.0] - }), - serde_json::json!({ - "jsonrpc": "2.0", - "result": [ - { - "common_data": { - "L2": { - "nonce": 86, - "fee": { - "gas_limit": "0xcc626", - "max_fee_per_gas": "0x141dd760", - "max_priority_fee_per_gas": "0x0", - "gas_per_pubdata_limit": "0x4e20" - }, - "initiatorAddress": "0x840bd73f903ba7dbb501be8326fe521dadcae1a5", - "signature": [ - 135, - 163, - 2, - 78, - 118, - 14, - 209 - ], - "transactionType": "EIP1559Transaction", - "input": { - "hash": "0xc1f625f55d186ad0b439054adfe3317ae703c5f588f4fa1896215e8810a141e0", - "data": [ - 2, - 249, - 1, - 110, - 130 - ] - }, - "paymasterParams": { - "paymaster": "0x0000000000000000000000000000000000000000", - "paymasterInput": [] - } - } + "zks_getRawBlockTransactions", + Some(serde_json::json!([miniblock.0])), + serde_json::json!([ + { + "common_data": { + "L2": { + "nonce": 86, + "fee": { + "gas_limit": "0xcc626", + "max_fee_per_gas": "0x141dd760", + "max_priority_fee_per_gas": "0x0", + "gas_per_pubdata_limit": "0x4e20" }, - "execute": { - "contractAddress": "0xbe7d1fd1f6748bbdefc4fbacafbb11c6fc506d1d", - "calldata": "0x38ed173900000000000000000000000000000000000000000000000000000000002c34cc00000000000000000000000000000000000000000000000000000000002c9a2500000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000840bd73f903ba7dbb501be8326fe521dadcae1a500000000000000000000000000000000000000000000000000000000652c5d1900000000000000000000000000000000000000000000000000000000000000020000000000000000000000008e86e46278518efc1c5ced245cba2c7e3ef115570000000000000000000000003355df6d4c9c3035724fd0e3914de96a5a83aaf4", - "value": "0x0", - "factoryDeps": null + "initiatorAddress": "0x840bd73f903ba7dbb501be8326fe521dadcae1a5", + "signature": [ + 135, + 163, + 2, + 78, + 118, + 14, + 209 + ], + "transactionType": "EIP1559Transaction", + "input": { + "hash": "0xc1f625f55d186ad0b439054adfe3317ae703c5f588f4fa1896215e8810a141e0", + "data": [ + 2, + 249, + 1, + 110, + 130 + ] }, - "received_timestamp_ms": 1697405097873u64, - "raw_bytes": "0x02f9016e820144568084141dd760830cc62694be7d1fd1f6748bbdefc4fbacafbb11c6fc506d1d80b9010438ed173900000000000000000000000000000000000000000000000000000000002c34cc00000000000000000000000000000000000000000000000000000000002c9a2500000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000840bd73f903ba7dbb501be8326fe521dadcae1a500000000000000000000000000000000000000000000000000000000652c5d1900000000000000000000000000000000000000000000000000000000000000020000000000000000000000008e86e46278518efc1c5ced245cba2c7e3ef115570000000000000000000000003355df6d4c9c3035724fd0e3914de96a5a83aaf4c080a087a3024e760ed14134ef541608bf308e083c899a89dba3c02bf3040f07c8b91b9fc3a7eeb6b3b8b36bb03ea4352415e7815dda4954f4898d255bd7660736285e" + "paymasterParams": { + "paymaster": "0x0000000000000000000000000000000000000000", + "paymasterInput": [] + } } - ], - "id": 0 - }), + }, + "execute": { + "contractAddress": "0xbe7d1fd1f6748bbdefc4fbacafbb11c6fc506d1d", + "calldata": "0x38ed173900000000000000000000000000000000000000000000000000000000002c34cc00000000000000000000000000000000000000000000000000000000002c9a2500000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000840bd73f903ba7dbb501be8326fe521dadcae1a500000000000000000000000000000000000000000000000000000000652c5d1900000000000000000000000000000000000000000000000000000000000000020000000000000000000000008e86e46278518efc1c5ced245cba2c7e3ef115570000000000000000000000003355df6d4c9c3035724fd0e3914de96a5a83aaf4", + "value": "0x0", + "factoryDeps": null + }, + "received_timestamp_ms": 1697405097873u64, + "raw_bytes": "0x02f9016e820144568084141dd760830cc62694be7d1fd1f6748bbdefc4fbacafbb11c6fc506d1d80b9010438ed173900000000000000000000000000000000000000000000000000000000002c34cc00000000000000000000000000000000000000000000000000000000002c9a2500000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000840bd73f903ba7dbb501be8326fe521dadcae1a500000000000000000000000000000000000000000000000000000000652c5d1900000000000000000000000000000000000000000000000000000000000000020000000000000000000000008e86e46278518efc1c5ced245cba2c7e3ef115570000000000000000000000003355df6d4c9c3035724fd0e3914de96a5a83aaf4c080a087a3024e760ed14134ef541608bf308e083c899a89dba3c02bf3040f07c8b91b9fc3a7eeb6b3b8b36bb03ea4352415e7815dda4954f4898d255bd7660736285e" + } + ]), ); let node = InMemoryNode::test(Some( - ForkDetails::from_network(&mock_server.url(), None, &CacheConfig::None) + ForkClient::at_block_number(ForkConfig::unknown(mock_server.url()), None) .await .unwrap(), )); @@ -756,141 +615,103 @@ mod tests { let cbeth_address = Address::from_str("0x75af292c1c9a37b3ea2e6041168b4e48875b9ed5") .expect("failed to parse address"); let mock_server = testing::MockServer::run(); - mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "eth_chainId", - }), - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "result": "0x104", - }), - ); + mock_server.expect("eth_chainId", None, serde_json::json!("0x104")); mock_server.expect( + "zks_getBlockDetails", + Some(serde_json::json!([1])), serde_json::json!({ - "jsonrpc": "2.0", - "id": 1, - "method": "zks_getBlockDetails", - "params": [1] - }), - serde_json::json!({ - "jsonrpc": "2.0", - "result": { - "baseSystemContractsHashes": { - "bootloader": "0x010008a5c30072f79f8e04f90b31f34e554279957e7e2bf85d3e9c7c1e0f834d", - "default_aa": "0x01000663d7941c097ba2631096508cf9ec7769ddd40e081fd81b0d04dc07ea0e" - }, - "commitTxHash": null, - "committedAt": null, - "executeTxHash": null, - "executedAt": null, - "l1BatchNumber": 0, - "l1GasPrice": 0, - "l1TxCount": 1, - "l2FairGasPrice": 50000000, - "l2TxCount": 0, - "number": 0, - "operatorAddress": "0x0000000000000000000000000000000000000000", - "protocolVersion": "Version16", - "proveTxHash": null, - "provenAt": null, - "rootHash": "0xdaa77426c30c02a43d9fba4e841a6556c524d47030762eb14dc4af897e605d9b", - "status": "verified", - "timestamp": 1000 + "baseSystemContractsHashes": { + "bootloader": "0x010008a5c30072f79f8e04f90b31f34e554279957e7e2bf85d3e9c7c1e0f834d", + "default_aa": "0x01000663d7941c097ba2631096508cf9ec7769ddd40e081fd81b0d04dc07ea0e" }, - "id": 1 + "commitTxHash": null, + "committedAt": null, + "executeTxHash": null, + "executedAt": null, + "l1BatchNumber": 0, + "l1GasPrice": 0, + "l1TxCount": 1, + "l2FairGasPrice": 50000000, + "l2TxCount": 0, + "number": 0, + "operatorAddress": "0x0000000000000000000000000000000000000000", + "protocolVersion": "Version16", + "proveTxHash": null, + "provenAt": null, + "rootHash": "0xdaa77426c30c02a43d9fba4e841a6556c524d47030762eb14dc4af897e605d9b", + "status": "verified", + "timestamp": 1000 }), ); mock_server.expect( + "eth_getBlockByHash", + Some(serde_json::json!(["0xdaa77426c30c02a43d9fba4e841a6556c524d47030762eb14dc4af897e605d9b", true])), serde_json::json!({ - "jsonrpc": "2.0", - "id": 2, - "method": "eth_getBlockByHash", - "params": ["0xdaa77426c30c02a43d9fba4e841a6556c524d47030762eb14dc4af897e605d9b", true] - }), - serde_json::json!({ - "jsonrpc": "2.0", - "result": { - "baseFeePerGas": "0x0", - "difficulty": "0x0", - "extraData": "0x", - "gasLimit": "0xffffffff", - "gasUsed": "0x0", - "hash": "0xdaa77426c30c02a43d9fba4e841a6556c524d47030762eb14dc4af897e605d9b", - "l1BatchNumber": "0x0", - "l1BatchTimestamp": null, - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0x0", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "receiptsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", - "sealFields": [], - "sha3Uncles": "0x0000000000000000000000000000000000000000000000000000000000000000", - "size": "0x0", - "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp": "0x3e8", - "totalDifficulty": "0x0", - "transactions": [], - "transactionsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", - "uncles": [] - }, - "id": 2 + "baseFeePerGas": "0x0", + "difficulty": "0x0", + "extraData": "0x", + "gasLimit": "0xffffffff", + "gasUsed": "0x0", + "hash": "0xdaa77426c30c02a43d9fba4e841a6556c524d47030762eb14dc4af897e605d9b", + "l1BatchNumber": "0x0", + "l1BatchTimestamp": null, + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x0000000000000000000000000000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "number": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "receiptsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "sealFields": [], + "sha3Uncles": "0x0000000000000000000000000000000000000000000000000000000000000000", + "size": "0x0", + "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp": "0x3e8", + "totalDifficulty": "0x0", + "transactions": [], + "transactionsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "uncles": [] }), ); mock_server.expect( - serde_json::json!({ - "jsonrpc": "2.0", - "id": 0, - "method": "zks_getConfirmedTokens", - "params": [0, 100] - }), - serde_json::json!({ - "jsonrpc": "2.0", - "result": [ - { - "decimals": 18, - "l1Address": "0xbe9895146f7af43049ca1c1ae358b0541ea49704", - "l2Address": "0x75af292c1c9a37b3ea2e6041168b4e48875b9ed5", - "name": "Coinbase Wrapped Staked ETH", - "symbol": "cbETH" - } - ], - "id": 0 - }), + "zks_getConfirmedTokens", + Some(serde_json::json!([0, 100])), + serde_json::json!([ + { + "decimals": 18, + "l1Address": "0xbe9895146f7af43049ca1c1ae358b0541ea49704", + "l2Address": "0x75af292c1c9a37b3ea2e6041168b4e48875b9ed5", + "name": "Coinbase Wrapped Staked ETH", + "symbol": "cbETH" + } + ]), ); mock_server.expect( + "zks_getFeeParams", + None, serde_json::json!({ - "jsonrpc": "2.0", - "id": 3, - "method": "zks_getFeeParams", - }), - serde_json::json!({ - "jsonrpc": "2.0", - "result": { - "V2": { - "config": { - "minimal_l2_gas_price": 25000000, - "compute_overhead_part": 0, - "pubdata_overhead_part": 1, - "batch_overhead_l1_gas": 800000, - "max_gas_per_batch": 200000000, - "max_pubdata_per_batch": 240000 - }, - "l1_gas_price": 46226388803u64, - "l1_pubdata_price": 100780475095u64 + "V2": { + "config": { + "minimal_l2_gas_price": 25000000, + "compute_overhead_part": 0, + "pubdata_overhead_part": 1, + "batch_overhead_l1_gas": 800000, + "max_gas_per_batch": 200000000, + "max_pubdata_per_batch": 240000 + }, + "l1_gas_price": 46226388803u64, + "l1_pubdata_price": 100780475095u64, + "conversion_ratio": { + "numerator": 1, + "denominator": 1 } - }, - "id": 3 + } }), ); let node = InMemoryNode::test(Some( - ForkDetails::from_network(&mock_server.url(), Some(1), &CacheConfig::None) + ForkClient::at_block_number(ForkConfig::unknown(mock_server.url()), Some(1.into())) .await .unwrap(), )); diff --git a/crates/core/src/testing.rs b/crates/core/src/testing.rs index f4614679..19a37bf5 100644 --- a/crates/core/src/testing.rs +++ b/crates/core/src/testing.rs @@ -5,26 +5,25 @@ #![cfg(test)] -use crate::deps::InMemoryStorage; -use crate::node::fork::ForkSource; use crate::node::{InMemoryNode, TxExecutionInfo}; -use eyre::eyre; use httptest::{ matchers::{eq, json_decoded, request}, responders::json_encoded, Expectation, Server, }; use itertools::Itertools; +use serde::{Deserialize, Serialize}; use std::str::FromStr; -use zksync_types::api::{ - BlockDetailsBase, BlockIdVariant, BlockStatus, BridgeAddresses, DebugCall, DebugCallType, Log, +use std::sync::{Arc, RwLock}; +use url::Url; +use zksync_types::api::{BridgeAddresses, DebugCall, DebugCallType, Log}; +use zksync_types::fee::Fee; +use zksync_types::l2::L2Tx; +use zksync_types::{ + Address, K256PrivateKey, L2ChainId, Nonce, ProtocolVersionId, H160, H256, U256, U64, }; -use zksync_types::block::pack_block_info; -use zksync_types::u256_to_h256; -use zksync_types::{fee::Fee, l2::L2Tx, Address, L2ChainId, Nonce, ProtocolVersionId, H256, U256}; -use zksync_types::{AccountTreeId, L1BatchNumber, L2BlockNumber, H160, U64}; -use zksync_types::{K256PrivateKey, StorageKey}; +use zksync_web3_decl::jsonrpsee::types::TwoPointZero; /// Configuration for the [MockServer]'s initial block. #[derive(Default, Debug, Clone)] @@ -34,6 +33,14 @@ pub struct ForkBlockConfig { pub transaction_count: u8, } +#[derive(Serialize, Deserialize, Debug)] +struct RpcRequest { + pub jsonrpc: TwoPointZero, + pub id: u64, + pub method: String, + pub params: Option, +} + /// A HTTP server that can be used to mock a fork source. pub struct MockServer { /// The implementation for [httptest::Server]. @@ -58,25 +65,24 @@ impl MockServer { Expectation::matching(request::body(json_decoded(eq(serde_json::json!({ "jsonrpc": "2.0", "id": 0, - "method": "eth_chainId", + "method": "eth_blockNumber", }))))) .respond_with(json_encoded(serde_json::json!({ "jsonrpc": "2.0", "id": 0, - "result": "0x104", + "result": format!("{:#x}", block_config.number), }))), ); - server.expect( Expectation::matching(request::body(json_decoded(eq(serde_json::json!({ "jsonrpc": "2.0", "id": 1, - "method": "eth_blockNumber", + "method": "eth_chainId", }))))) .respond_with(json_encoded(serde_json::json!({ "jsonrpc": "2.0", "id": 1, - "result": format!("{:#x}", block_config.number), + "result": "0x104", }))), ); server.expect( @@ -178,7 +184,11 @@ impl MockServer { "max_pubdata_per_batch": 240000 }, "l1_gas_price": 46226388803u64, - "l1_pubdata_price": 100780475095u64 + "l1_pubdata_price": 100780475095u64, + "conversion_ratio": { + "numerator": 1, + "denominator": 1 + } } }, "id": 4 @@ -189,15 +199,37 @@ impl MockServer { } /// Retrieve the mock server's url. - pub fn url(&self) -> String { - self.inner.url("").to_string() + pub fn url(&self) -> Url { + self.inner.url("").to_string().parse().unwrap() } /// Assert an exactly single call expectation with a given request and the provided response. - pub fn expect(&self, request: serde_json::Value, response: serde_json::Value) { + pub fn expect( + &self, + method: &str, + params: Option, + result: serde_json::Value, + ) { + let method = method.to_string(); + let id_matcher = Arc::new(RwLock::new(0)); + let id_matcher_clone = id_matcher.clone(); self.inner.expect( - Expectation::matching(request::body(json_decoded(eq(request)))) - .respond_with(json_encoded(response)), + Expectation::matching(request::body(json_decoded(move |request: &RpcRequest| { + let result = request.method == method && request.params == params; + if result { + let mut writer = id_matcher.write().unwrap(); + *writer = request.id; + } + result + }))) + .respond_with(move || { + let id = *id_matcher_clone.read().unwrap(); + json_encoded(serde_json::json!({ + "jsonrpc": "2.0", + "result": result, + "id": id + })) + }), ); } } @@ -635,154 +667,7 @@ pub fn assert_bridge_addresses_eq( ); } -/// Represents a read-only fork source that is backed by the provided [InMemoryStorage]. -#[derive(Debug, Clone)] -pub struct ExternalStorage { - pub raw_storage: InMemoryStorage, -} - -impl ForkSource for ExternalStorage { - fn get_fork_url(&self) -> eyre::Result { - Err(eyre!("Not implemented")) - } - - fn get_storage_at( - &self, - address: H160, - idx: U256, - _block: Option, - ) -> eyre::Result { - let key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(idx)); - Ok(self - .raw_storage - .state - .get(&key) - .cloned() - .unwrap_or_default()) - } - - fn get_raw_block_transactions( - &self, - _block_number: L2BlockNumber, - ) -> eyre::Result> { - todo!() - } - - fn get_bytecode_by_hash(&self, hash: H256) -> eyre::Result>> { - Ok(self.raw_storage.factory_deps.get(&hash).cloned()) - } - - fn get_transaction_by_hash( - &self, - _hash: H256, - ) -> eyre::Result> { - todo!() - } - - fn get_transaction_details( - &self, - _hash: H256, - ) -> eyre::Result> { - todo!() - } - - fn get_block_by_hash( - &self, - _hash: H256, - _full_transactions: bool, - ) -> eyre::Result>> { - todo!() - } - - fn get_block_by_number( - &self, - _block_number: zksync_types::api::BlockNumber, - _full_transactions: bool, - ) -> eyre::Result>> { - todo!() - } - - fn get_block_details( - &self, - miniblock: L2BlockNumber, - ) -> eyre::Result> { - Ok(Some(zksync_types::api::BlockDetails { - number: miniblock, - l1_batch_number: L1BatchNumber(123), - base: BlockDetailsBase { - timestamp: 0, - l1_tx_count: 0, - l2_tx_count: 0, - root_hash: None, - status: BlockStatus::Sealed, - commit_tx_hash: None, - committed_at: None, - commit_chain_id: None, - prove_tx_hash: None, - proven_at: None, - prove_chain_id: None, - execute_tx_hash: None, - executed_at: None, - execute_chain_id: None, - l1_gas_price: 123, - l2_fair_gas_price: 234, - fair_pubdata_price: Some(345), - base_system_contracts_hashes: Default::default(), - }, - operator_address: H160::zero(), - protocol_version: None, - })) - } - - fn get_fee_params(&self) -> eyre::Result { - todo!() - } - - fn get_block_transaction_count_by_hash(&self, _block_hash: H256) -> eyre::Result> { - todo!() - } - - fn get_block_transaction_count_by_number( - &self, - _block_number: zksync_types::api::BlockNumber, - ) -> eyre::Result> { - todo!() - } - - fn get_transaction_by_block_hash_and_index( - &self, - _block_hash: H256, - _index: zksync_types::web3::Index, - ) -> eyre::Result> { - todo!() - } - - fn get_transaction_by_block_number_and_index( - &self, - _block_number: zksync_types::api::BlockNumber, - _index: zksync_types::web3::Index, - ) -> eyre::Result> { - todo!() - } - - fn get_bridge_contracts(&self) -> eyre::Result { - todo!() - } - - fn get_confirmed_tokens( - &self, - _from: u32, - _limit: u8, - ) -> eyre::Result> { - todo!() - } -} - mod test { - use maplit::hashmap; - use zksync_types::block::unpack_block_info; - use zksync_types::h256_to_u256; - use super::*; #[test] @@ -928,72 +813,4 @@ mod test { log.topics ); } - - #[test] - fn test_external_storage() { - let input_batch = 1; - let input_l2_block = 2; - let input_timestamp = 3; - let input_bytecode = vec![0x4]; - let batch_key = StorageKey::new( - AccountTreeId::new(zksync_types::SYSTEM_CONTEXT_ADDRESS), - zksync_types::SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let l2_block_key = StorageKey::new( - AccountTreeId::new(zksync_types::SYSTEM_CONTEXT_ADDRESS), - zksync_types::SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - - let storage = &ExternalStorage { - raw_storage: InMemoryStorage { - state: hashmap! { - batch_key => u256_to_h256(U256::from(input_batch)), - l2_block_key => u256_to_h256(pack_block_info( - input_l2_block, - input_timestamp, - )) - }, - factory_deps: hashmap! { - H256::repeat_byte(0x1) => input_bytecode.clone(), - }, - }, - }; - - let actual_batch = storage - .get_storage_at( - zksync_types::SYSTEM_CONTEXT_ADDRESS, - h256_to_u256(zksync_types::SYSTEM_CONTEXT_BLOCK_INFO_POSITION), - None, - ) - .map(|value| h256_to_u256(value).as_u64()) - .expect("failed getting batch number"); - assert_eq!(input_batch, actual_batch); - - let (actual_l2_block, actual_timestamp) = storage - .get_storage_at( - zksync_types::SYSTEM_CONTEXT_ADDRESS, - h256_to_u256(zksync_types::SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION), - None, - ) - .map(|value| unpack_block_info(h256_to_u256(value))) - .expect("failed getting l2 block info"); - assert_eq!(input_l2_block, actual_l2_block); - assert_eq!(input_timestamp, actual_timestamp); - - let zero_missing_value = storage - .get_storage_at( - zksync_types::SYSTEM_CONTEXT_ADDRESS, - h256_to_u256(H256::repeat_byte(0x1e)), - None, - ) - .map(|value| h256_to_u256(value).as_u64()) - .expect("failed missing value"); - assert_eq!(0, zero_missing_value); - - let actual_bytecode = storage - .get_bytecode_by_hash(H256::repeat_byte(0x1)) - .expect("failed getting bytecode") - .expect("missing bytecode"); - assert_eq!(input_bytecode, actual_bytecode); - } } diff --git a/e2e-tests-rust/Cargo.lock b/e2e-tests-rust/Cargo.lock index 20e2dc5f..ce4aa8e9 100644 --- a/e2e-tests-rust/Cargo.lock +++ b/e2e-tests-rust/Cargo.lock @@ -956,12 +956,11 @@ dependencies = [ "tempdir", "tokio", "tower 0.5.1", - "tower-http", ] [[package]] name = "anvil_zksync_config" -version = "0.2.4" +version = "0.2.5" dependencies = [ "alloy-signer 0.5.4", "alloy-signer-local 0.5.4", @@ -979,7 +978,7 @@ dependencies = [ [[package]] name = "anvil_zksync_core" -version = "0.2.4" +version = "0.2.5" dependencies = [ "anvil_zksync_config", "anvil_zksync_types", @@ -1005,6 +1004,7 @@ dependencies = [ "tokio", "tracing", "tracing-subscriber", + "url", "zksync_contracts", "zksync_multivm", "zksync_types", @@ -1013,7 +1013,7 @@ dependencies = [ [[package]] name = "anvil_zksync_types" -version = "0.2.4" +version = "0.2.5" dependencies = [ "clap", "serde", @@ -6286,20 +6286,6 @@ dependencies = [ "tower-service", ] -[[package]] -name = "tower-http" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" -dependencies = [ - "bitflags 2.6.0", - "bytes", - "http 1.1.0", - "pin-project-lite", - "tower-layer", - "tower-service", -] - [[package]] name = "tower-layer" version = "0.3.3"