diff --git a/.dockerignore b/.dockerignore index 51424900e8..2d5303a3be 100644 --- a/.dockerignore +++ b/.dockerignore @@ -36,6 +36,7 @@ arbitrator/tools/wasmer/target/ arbitrator/tools/wasm-tools/ arbitrator/tools/pricers/ arbitrator/tools/module_roots/ +arbitrator/tools/stylus_benchmark arbitrator/langs/rust/target/ arbitrator/langs/bf/target/ diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index 47646017ac..dd58a30571 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -171,6 +171,9 @@ jobs: - name: Rustfmt - langs/rust run: cargo fmt --all --manifest-path arbitrator/langs/rust/Cargo.toml -- --check + - name: Rustfmt - tools/stylus_benchmark + run: cargo fmt --all --manifest-path arbitrator/tools/stylus_benchmark/Cargo.toml -- --check + - name: Make proofs from test cases run: make -j test-gen-proofs diff --git a/.github/workflows/submodule-pin-check.yml b/.github/workflows/submodule-pin-check.yml index 60dd8ad827..a6a7d9b66c 100644 --- a/.github/workflows/submodule-pin-check.yml +++ b/.github/workflows/submodule-pin-check.yml @@ -25,12 +25,12 @@ jobs: run: | status_state="pending" declare -Ar exceptions=( - [contracts]=origin/develop + [contracts]=origin/pre-bold [nitro-testnode]=origin/master - + #TODO Rachel to check these are the intended branches. [arbitrator/langs/c]=origin/vm-storage-cache - [arbitrator/tools/wasmer]=origin/adopt-v4.2.8 + [arbitrator/tools/wasmer]=origin/stylus ) divergent=0 for mod in `git submodule --quiet foreach 'echo $name'`; do @@ -38,7 +38,7 @@ jobs: if [[ -v exceptions[$mod] ]]; then branch=${exceptions[$mod]} fi - + if ! git -C $mod merge-base --is-ancestor HEAD $branch; then echo $mod diverges from $branch divergent=1 diff --git a/arbitrator/Cargo.toml b/arbitrator/Cargo.toml index eaafb6e439..3c5228daf2 100644 --- a/arbitrator/Cargo.toml +++ b/arbitrator/Cargo.toml @@ -12,6 +12,7 @@ members = [ exclude = [ "stylus/tests/", "tools/wasmer/", + "tools/stylus_benchmark", ] resolver = "2" diff --git a/arbitrator/arbutil/src/benchmark.rs b/arbitrator/arbutil/src/benchmark.rs new file mode 100644 index 0000000000..580d0191a0 --- /dev/null +++ b/arbitrator/arbutil/src/benchmark.rs @@ -0,0 +1,14 @@ +// Copyright 2024, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +use crate::evm::api::Ink; +use std::time::{Duration, Instant}; + +// Benchmark is used to track the performance of blocks of code in stylus +#[derive(Clone, Copy, Debug, Default)] +pub struct Benchmark { + pub timer: Option, + pub elapsed_total: Duration, + pub ink_start: Option, + pub ink_total: Ink, +} diff --git a/arbitrator/arbutil/src/lib.rs b/arbitrator/arbutil/src/lib.rs index 9c48a9fefc..e17e8d9448 100644 --- a/arbitrator/arbutil/src/lib.rs +++ b/arbitrator/arbutil/src/lib.rs @@ -1,6 +1,7 @@ // Copyright 2022-2024, Offchain Labs, Inc. // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE +pub mod benchmark; /// cbindgen:ignore pub mod color; pub mod crypto; diff --git a/arbitrator/jit/src/lib.rs b/arbitrator/jit/src/lib.rs new file mode 100644 index 0000000000..d0ad76bd03 --- /dev/null +++ b/arbitrator/jit/src/lib.rs @@ -0,0 +1,51 @@ +// Copyright 2021-2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +use std::path::PathBuf; +use structopt::StructOpt; + +mod arbcompress; +mod caller_env; +pub mod machine; +mod prepare; +pub mod program; +mod socket; +pub mod stylus_backend; +mod test; +mod wasip1_stub; +mod wavmio; + +#[derive(StructOpt)] +#[structopt(name = "jit-prover")] +pub struct Opts { + #[structopt(short, long)] + binary: PathBuf, + #[structopt(long, default_value = "0")] + inbox_position: u64, + #[structopt(long, default_value = "0")] + delayed_inbox_position: u64, + #[structopt(long, default_value = "0")] + position_within_message: u64, + #[structopt(long)] + last_block_hash: Option, + #[structopt(long)] + last_send_root: Option, + #[structopt(long)] + inbox: Vec, + #[structopt(long)] + delayed_inbox: Vec, + #[structopt(long)] + preimages: Option, + #[structopt(long)] + cranelift: bool, + #[structopt(long)] + forks: bool, + #[structopt(long)] + pub debug: bool, + #[structopt(long)] + pub require_success: bool, + // JSON inputs supercede any of the command-line inputs which could + // be specified in the JSON file. + #[structopt(long)] + json_inputs: Option, +} diff --git a/arbitrator/jit/src/main.rs b/arbitrator/jit/src/main.rs index 6e44500215..e19fabc250 100644 --- a/arbitrator/jit/src/main.rs +++ b/arbitrator/jit/src/main.rs @@ -1,58 +1,13 @@ // Copyright 2022-2024, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -use crate::machine::{Escape, WasmEnv}; use arbutil::{color, Color}; use eyre::Result; -use std::path::PathBuf; +use jit::machine; +use jit::machine::{Escape, WasmEnv}; +use jit::Opts; use structopt::StructOpt; -mod arbcompress; -mod caller_env; -mod machine; -mod prepare; -mod program; -mod socket; -mod stylus_backend; -mod test; -mod wasip1_stub; -mod wavmio; - -#[derive(StructOpt)] -#[structopt(name = "jit-prover")] -pub struct Opts { - #[structopt(short, long)] - binary: PathBuf, - #[structopt(long, default_value = "0")] - inbox_position: u64, - #[structopt(long, default_value = "0")] - delayed_inbox_position: u64, - #[structopt(long, default_value = "0")] - position_within_message: u64, - #[structopt(long)] - last_block_hash: Option, - #[structopt(long)] - last_send_root: Option, - #[structopt(long)] - inbox: Vec, - #[structopt(long)] - delayed_inbox: Vec, - #[structopt(long)] - preimages: Option, - #[structopt(long)] - cranelift: bool, - #[structopt(long)] - forks: bool, - #[structopt(long)] - debug: bool, - #[structopt(long)] - require_success: bool, - // JSON inputs supercede any of the command-line inputs which could - // be specified in the JSON file. - #[structopt(long)] - json_inputs: Option, -} - fn main() -> Result<()> { let opts = Opts::from_args(); let env = match WasmEnv::cli(&opts) { diff --git a/arbitrator/jit/src/prepare.rs b/arbitrator/jit/src/prepare.rs index e7a7ba0f4d..62dd063b75 100644 --- a/arbitrator/jit/src/prepare.rs +++ b/arbitrator/jit/src/prepare.rs @@ -1,7 +1,7 @@ // Copyright 2022-2024, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -use crate::WasmEnv; +use crate::machine::WasmEnv; use arbutil::{Bytes32, PreimageType}; use eyre::Ok; use prover::parse_input::FileData; diff --git a/arbitrator/jit/src/program.rs b/arbitrator/jit/src/program.rs index f10a059748..d80b3771c6 100644 --- a/arbitrator/jit/src/program.rs +++ b/arbitrator/jit/src/program.rs @@ -4,8 +4,8 @@ #![allow(clippy::too_many_arguments)] use crate::caller_env::JitEnv; -use crate::machine::{Escape, MaybeEscape, WasmEnvMut}; -use crate::stylus_backend::exec_wasm; +use crate::machine::{Escape, MaybeEscape, WasmEnv, WasmEnvMut}; +use crate::stylus_backend::{exec_wasm, MessageFromCothread}; use arbutil::evm::api::Gas; use arbutil::Bytes32; use arbutil::{evm::EvmData, format::DebugBytes, heapify}; @@ -16,6 +16,7 @@ use prover::{ machine::Module, programs::{config::PricingParams, prelude::*}, }; +use std::sync::Arc; const DEFAULT_STYLUS_ARBOS_VERSION: u64 = 31; @@ -130,10 +131,6 @@ pub fn new_program( let evm_data: EvmData = unsafe { *Box::from_raw(evm_data_handler as *mut EvmData) }; let config: JitConfig = unsafe { *Box::from_raw(stylus_config_handler as *mut JitConfig) }; - // buy ink - let pricing = config.stylus.pricing; - let ink = pricing.gas_to_ink(Gas(gas)); - let Some(module) = exec.module_asms.get(&compiled_hash).cloned() else { return Err(Escape::Failure(format!( "module hash {:?} not found in {:?}", @@ -142,6 +139,21 @@ pub fn new_program( ))); }; + exec_program(exec, module, calldata, config, evm_data, gas) +} + +pub fn exec_program( + exec: &mut WasmEnv, + module: Arc<[u8]>, + calldata: Vec, + config: JitConfig, + evm_data: EvmData, + gas: u64, +) -> Result { + // buy ink + let pricing = config.stylus.pricing; + let ink = pricing.gas_to_ink(Gas(gas)); + let cothread = exec_wasm( module, calldata, @@ -162,7 +174,10 @@ pub fn new_program( /// returns request_id for the first request from the program pub fn start_program(mut env: WasmEnvMut, module: u32) -> Result { let (_, exec) = env.jit_env(); + start_program_with_wasm_env(exec, module) +} +pub fn start_program_with_wasm_env(exec: &mut WasmEnv, module: u32) -> Result { if exec.threads.len() as u32 != module || module == 0 { return Escape::hostio(format!( "got request for thread {module} but len is {}", @@ -179,13 +194,18 @@ pub fn start_program(mut env: WasmEnvMut, module: u32) -> Result { /// request_id MUST be last request id returned from start_program or send_response pub fn get_request(mut env: WasmEnvMut, id: u32, len_ptr: GuestPtr) -> Result { let (mut mem, exec) = env.jit_env(); + let msg = get_last_msg(exec, id)?; + mem.write_u32(len_ptr, msg.req_data.len() as u32); + Ok(msg.req_type) +} + +pub fn get_last_msg(exec: &mut WasmEnv, id: u32) -> Result { let thread = exec.threads.last_mut().unwrap(); let msg = thread.last_message()?; if msg.1 != id { return Escape::hostio("get_request id doesn't match"); }; - mem.write_u32(len_ptr, msg.0.req_data.len() as u32); - Ok(msg.0.req_type) + Ok(msg.0) } // gets data associated with last request. @@ -193,12 +213,8 @@ pub fn get_request(mut env: WasmEnvMut, id: u32, len_ptr: GuestPtr) -> Result MaybeEscape { let (mut mem, exec) = env.jit_env(); - let thread = exec.threads.last_mut().unwrap(); - let msg = thread.last_message()?; - if msg.1 != id { - return Escape::hostio("get_request id doesn't match"); - }; - mem.write_slice(data_ptr, &msg.0.req_data); + let msg = get_last_msg(exec, id)?; + mem.write_slice(data_ptr, &msg.req_data); Ok(()) } @@ -217,11 +233,21 @@ pub fn set_response( let result = mem.read_slice(result_ptr, result_len as usize); let raw_data = mem.read_slice(raw_data_ptr, raw_data_len as usize); + set_response_with_wasm_env(exec, id, gas, result, raw_data) +} + +pub fn set_response_with_wasm_env( + exec: &mut WasmEnv, + id: u32, + gas: u64, + result: Vec, + raw_data: Vec, +) -> MaybeEscape { let thread = exec.threads.last_mut().unwrap(); thread.set_response(id, result, raw_data, Gas(gas)) } -/// sends previos response +/// sends previous response /// MUST be called right after set_response to the same id /// returns request_id for the next request pub fn send_response(mut env: WasmEnvMut, req_id: u32) -> Result { @@ -239,7 +265,10 @@ pub fn send_response(mut env: WasmEnvMut, req_id: u32) -> Result { /// removes the last created program pub fn pop(mut env: WasmEnvMut) -> MaybeEscape { let (_, exec) = env.jit_env(); + pop_with_wasm_env(exec) +} +pub fn pop_with_wasm_env(exec: &mut WasmEnv) -> MaybeEscape { match exec.threads.pop() { None => Err(Escape::Child(eyre!("no child"))), Some(mut thread) => thread.wait_done(), @@ -247,8 +276,8 @@ pub fn pop(mut env: WasmEnvMut) -> MaybeEscape { } pub struct JitConfig { - stylus: StylusConfig, - compile: CompileConfig, + pub stylus: StylusConfig, + pub compile: CompileConfig, } /// Creates a `StylusConfig` from its component parts. diff --git a/arbitrator/jit/src/stylus_backend.rs b/arbitrator/jit/src/stylus_backend.rs index 0d8c477c6c..d250780dd9 100644 --- a/arbitrator/jit/src/stylus_backend.rs +++ b/arbitrator/jit/src/stylus_backend.rs @@ -4,6 +4,7 @@ #![allow(clippy::too_many_arguments)] use crate::machine::{Escape, MaybeEscape}; +use arbutil::benchmark::Benchmark; use arbutil::evm::api::{Gas, Ink, VecReader}; use arbutil::evm::{ api::{EvmApiMethod, EVM_API_METHOD_REQ_OFFSET}, @@ -35,6 +36,7 @@ struct MessageToCothread { pub struct MessageFromCothread { pub req_type: u32, pub req_data: Vec, + pub benchmark: Benchmark, } struct CothreadRequestor { @@ -51,6 +53,7 @@ impl RequestHandler for CothreadRequestor { let msg = MessageFromCothread { req_type: req_type as u32 + EVM_API_METHOD_REQ_OFFSET, req_data: req_data.as_ref().to_vec(), + benchmark: Benchmark::default(), }; if let Err(error) = self.tx.send(msg) { @@ -169,6 +172,7 @@ pub fn exec_wasm( let msg = MessageFromCothread { req_data: output, req_type: out_kind as u32, + benchmark: instance.env().benchmark, }; instance .env_mut() diff --git a/arbitrator/stylus/src/env.rs b/arbitrator/stylus/src/env.rs index a153fb5bf1..a2c8189029 100644 --- a/arbitrator/stylus/src/env.rs +++ b/arbitrator/stylus/src/env.rs @@ -2,6 +2,7 @@ // For license information, see https://github.com/nitro/blob/master/LICENSE use arbutil::{ + benchmark::Benchmark, evm::{ api::{DataReader, EvmApi, Ink}, EvmData, @@ -48,6 +49,8 @@ pub struct WasmEnv> { pub compile: CompileConfig, /// The runtime config pub config: Option, + // Used to benchmark execution blocks of code + pub benchmark: Benchmark, // Using the unused generic parameter D in a PhantomData field _data_reader_marker: PhantomData, } @@ -68,6 +71,7 @@ impl> WasmEnv { outs: vec![], memory: None, meter: None, + benchmark: Benchmark::default(), _data_reader_marker: PhantomData, } } diff --git a/arbitrator/stylus/src/host.rs b/arbitrator/stylus/src/host.rs index c72cafc316..67497302a1 100644 --- a/arbitrator/stylus/src/host.rs +++ b/arbitrator/stylus/src/host.rs @@ -5,6 +5,7 @@ use crate::env::{Escape, HostioInfo, MaybeEscape, WasmEnv, WasmEnvMut}; use arbutil::{ + benchmark::Benchmark, evm::{ api::{DataReader, EvmApi, Gas, Ink}, EvmData, @@ -46,6 +47,10 @@ where &self.evm_data } + fn benchmark(&mut self) -> &mut Benchmark { + &mut self.env.benchmark + } + fn evm_return_data_len(&mut self) -> &mut u32 { &mut self.evm_data.return_data_len } @@ -464,3 +469,13 @@ pub(crate) fn console_tee, T: Into + Copy>( } pub(crate) fn null_host>(_: WasmEnvMut) {} + +pub(crate) fn start_benchmark>( + mut env: WasmEnvMut, +) -> MaybeEscape { + hostio!(env, start_benchmark()) +} + +pub(crate) fn end_benchmark>(mut env: WasmEnvMut) -> MaybeEscape { + hostio!(env, end_benchmark()) +} diff --git a/arbitrator/stylus/src/native.rs b/arbitrator/stylus/src/native.rs index 0fbdb342f3..a31df1034c 100644 --- a/arbitrator/stylus/src/native.rs +++ b/arbitrator/stylus/src/native.rs @@ -212,6 +212,8 @@ impl> NativeInstance { imports.define("console", "tee_f32", func!(host::console_tee::)); imports.define("console", "tee_f64", func!(host::console_tee::)); imports.define("debug", "null_host", func!(host::null_host)); + imports.define("debug", "start_benchmark", func!(host::start_benchmark)); + imports.define("debug", "end_benchmark", func!(host::end_benchmark)); } let instance = Instance::new(&mut store, &module, &imports)?; let exports = &instance.exports; @@ -429,6 +431,8 @@ pub fn module(wasm: &[u8], compile: CompileConfig, target: Target) -> Result) { + let _ = match str::from_utf8(req_data) { + Ok(v) => v, + Err(e) => panic!("Invalid UTF-8 sequence: {}", e), + }; + + match req_type { + 0 => return, + 1 => panic!("ErrExecutionReverted user revert"), + 2 => panic!("ErrExecutionReverted user failure"), + 3 => panic!("ErrOutOfGas user out of ink"), + 4 => panic!("ErrDepth user out of stack"), + _ => panic!("ErrExecutionReverted user unknown"), + } +} + +fn run(compiled_module: Vec) -> (Duration, Ink) { + let calldata = Vec::from([0u8; 32]); + let evm_data = EvmData::default(); + let config = JitConfig { + stylus: StylusConfig { + version: 2, + max_depth: 10000, + pricing: PricingParams { ink_price: 1 }, + }, + compile: CompileConfig::version(2, true), + }; + + let exec = &mut WasmEnv::default(); + + let module = jit::program::exec_program( + exec, + compiled_module.into(), + calldata, + config, + evm_data, + u64::MAX, + ) + .unwrap(); + + let req_id = jit::program::start_program_with_wasm_env(exec, module).unwrap(); + let msg = jit::program::get_last_msg(exec, req_id).unwrap(); + if msg.req_type < EVM_API_METHOD_REQ_OFFSET { + let _ = jit::program::pop_with_wasm_env(exec); + + let req_data = msg.req_data[8..].to_vec(); + check_result(msg.req_type, &req_data); + } else { + panic!("unsupported request type {:?}", msg.req_type); + } + + (msg.benchmark.elapsed_total, msg.benchmark.ink_total) +} + +pub fn benchmark(wat: Vec) -> eyre::Result<()> { + let wasm = wasmer::wat2wasm(&wat)?; + + let compiled_module = native::compile(&wasm, 2, true, Target::default())?; + + let mut durations: Vec = Vec::new(); + let mut ink_spent = Ink(0); + for i in 0..NUMBER_OF_BENCHMARK_RUNS { + print!("Run {:?}, ", i); + let (duration_run, ink_spent_run) = run(compiled_module.clone()); + durations.push(duration_run); + ink_spent = ink_spent_run; + println!( + "duration: {:?}, ink_spent: {:?}", + duration_run, ink_spent_run + ); + } + + // discard top and bottom runs + durations.sort(); + let l = NUMBER_OF_TOP_AND_BOTTOM_RUNS_TO_DISCARD as usize; + let r = NUMBER_OF_BENCHMARK_RUNS as usize - NUMBER_OF_TOP_AND_BOTTOM_RUNS_TO_DISCARD as usize; + durations = durations[l..r].to_vec(); + + let avg_duration = durations.iter().sum::() / (r - l) as u32; + let avg_ink_spent_per_micro_second = ink_spent.0 / avg_duration.as_micros() as u64; + println!("After discarding top and bottom runs: "); + println!( + "avg_duration: {:?}, avg_ink_spent_per_micro_second: {:?}", + avg_duration, avg_ink_spent_per_micro_second + ); + + Ok(()) +} diff --git a/arbitrator/tools/stylus_benchmark/src/main.rs b/arbitrator/tools/stylus_benchmark/src/main.rs new file mode 100644 index 0000000000..4b8971ecab --- /dev/null +++ b/arbitrator/tools/stylus_benchmark/src/main.rs @@ -0,0 +1,44 @@ +// Copyright 2021-2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +mod benchmark; +mod scenario; + +use clap::Parser; +use scenario::Scenario; +use std::path::PathBuf; +use strum::IntoEnumIterator; + +#[derive(Parser, Debug)] +#[command(version, about, long_about = None)] +struct Args { + #[arg(short, long)] + output_wat_dir_path: Option, + + #[arg(short, long)] + scenario: Option, +} + +fn handle_scenario(scenario: Scenario, output_wat_dir_path: Option) -> eyre::Result<()> { + println!("Benchmarking {}", scenario); + let wat = scenario::generate_wat(scenario, output_wat_dir_path); + benchmark::benchmark(wat) +} + +fn main() -> eyre::Result<()> { + let args = Args::parse(); + + match args.scenario { + Some(scenario) => handle_scenario(scenario, args.output_wat_dir_path), + None => { + println!("No scenario specified, benchmarking all scenarios\n"); + for scenario in Scenario::iter() { + let benchmark_result = handle_scenario(scenario, args.output_wat_dir_path.clone()); + if let Err(err) = benchmark_result { + return Err(err); + } + } + Ok(()) + } + } +} diff --git a/arbitrator/tools/stylus_benchmark/src/scenario.rs b/arbitrator/tools/stylus_benchmark/src/scenario.rs new file mode 100644 index 0000000000..348678ed69 --- /dev/null +++ b/arbitrator/tools/stylus_benchmark/src/scenario.rs @@ -0,0 +1,128 @@ +// Copyright 2021-2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +use std::fs::File; +use std::io::Write; +use std::path::PathBuf; +use strum_macros::{Display, EnumIter, EnumString}; + +#[derive(Copy, Clone, PartialEq, Eq, Debug, EnumString, Display, EnumIter)] +pub enum Scenario { + #[strum(serialize = "add_i32")] + AddI32, + #[strum(serialize = "xor_i32")] + XorI32, +} + +// Programs to be benchmarked have a loop in which several similar operations are executed. +// The number of operations per loop is chosen to be large enough so the overhead related to the loop is negligible, +// but not too large to avoid a big program size. +// Keeping a small program size is important to better use CPU cache, trying to keep the code in the cache. + +fn write_wat_beginning(wat: &mut Vec) { + wat.write_all(b"(module\n").unwrap(); + wat.write_all(b" (import \"debug\" \"start_benchmark\" (func $start_benchmark))\n") + .unwrap(); + wat.write_all(b" (import \"debug\" \"end_benchmark\" (func $end_benchmark))\n") + .unwrap(); + wat.write_all(b" (memory (export \"memory\") 0 0)\n") + .unwrap(); + wat.write_all(b" (global $ops_counter (mut i32) (i32.const 0))\n") + .unwrap(); + wat.write_all(b" (func (export \"user_entrypoint\") (param i32) (result i32)\n") + .unwrap(); + + wat.write_all(b" call $start_benchmark\n").unwrap(); + + wat.write_all(b" (loop $loop\n").unwrap(); +} + +fn write_wat_end( + wat: &mut Vec, + number_of_loop_iterations: usize, + number_of_ops_per_loop_iteration: usize, +) { + let number_of_ops = number_of_loop_iterations * number_of_ops_per_loop_iteration; + + // update ops_counter + wat.write_all(b" global.get $ops_counter\n") + .unwrap(); + wat.write_all( + format!( + " i32.const {}\n", + number_of_ops_per_loop_iteration + ) + .as_bytes(), + ) + .unwrap(); + wat.write_all(b" i32.add\n").unwrap(); + wat.write_all(b" global.set $ops_counter\n") + .unwrap(); + + // check if we need to continue looping + wat.write_all(b" global.get $ops_counter\n") + .unwrap(); + wat.write_all(format!(" i32.const {}\n", number_of_ops).as_bytes()) + .unwrap(); + wat.write_all(b" i32.lt_s\n").unwrap(); + wat.write_all(b" br_if $loop)\n").unwrap(); + + wat.write_all(b" call $end_benchmark\n").unwrap(); + + wat.write_all(b" i32.const 0)\n").unwrap(); + wat.write_all(b")").unwrap(); +} + +fn wat(write_wat_ops: fn(&mut Vec, usize)) -> Vec { + let number_of_loop_iterations = 200_000; + let number_of_ops_per_loop_iteration = 2000; + + let mut wat = Vec::new(); + + write_wat_beginning(&mut wat); + + write_wat_ops(&mut wat, number_of_ops_per_loop_iteration); + + write_wat_end( + &mut wat, + number_of_loop_iterations, + number_of_ops_per_loop_iteration, + ); + + wat.to_vec() +} + +fn write_add_i32_wat_ops(wat: &mut Vec, number_of_ops_per_loop_iteration: usize) { + wat.write_all(b" i32.const 0\n").unwrap(); + for _ in 0..number_of_ops_per_loop_iteration { + wat.write_all(b" i32.const 1\n").unwrap(); + wat.write_all(b" i32.add\n").unwrap(); + } + wat.write_all(b" drop\n").unwrap(); +} + +fn write_xor_i32_wat_ops(wat: &mut Vec, number_of_ops_per_loop_iteration: usize) { + wat.write_all(b" i32.const 1231\n").unwrap(); + for _ in 0..number_of_ops_per_loop_iteration { + wat.write_all(b" i32.const 12312313\n").unwrap(); + wat.write_all(b" i32.xor\n").unwrap(); + } + wat.write_all(b" drop\n").unwrap(); +} + +pub fn generate_wat(scenario: Scenario, output_wat_dir_path: Option) -> Vec { + let wat = match scenario { + Scenario::AddI32 => wat(write_add_i32_wat_ops), + Scenario::XorI32 => wat(write_xor_i32_wat_ops), + }; + + // print wat to file if needed + if let Some(output_wat_dir_path) = output_wat_dir_path { + let mut output_wat_path = output_wat_dir_path; + output_wat_path.push(format!("{}.wat", scenario)); + let mut file = File::create(output_wat_path).unwrap(); + file.write_all(&wat).unwrap(); + } + + wat +} diff --git a/arbitrator/wasm-libraries/user-host-trait/src/lib.rs b/arbitrator/wasm-libraries/user-host-trait/src/lib.rs index 2f410849fc..25163e25bc 100644 --- a/arbitrator/wasm-libraries/user-host-trait/src/lib.rs +++ b/arbitrator/wasm-libraries/user-host-trait/src/lib.rs @@ -2,6 +2,7 @@ // For license information, see https://github.com/nitro/blob/master/LICENSE use arbutil::{ + benchmark::Benchmark, crypto, evm::{ self, @@ -21,6 +22,7 @@ use prover::{ }; use ruint2::Uint; use std::fmt::Display; +use std::time::Instant; macro_rules! be { ($int:expr) => { @@ -68,6 +70,7 @@ pub trait UserHost: GasMeteredMachine { fn evm_api(&mut self) -> &mut Self::A; fn evm_data(&self) -> &EvmData; + fn benchmark(&mut self) -> &mut Benchmark; fn evm_return_data_len(&mut self) -> &mut u32; fn read_slice(&self, ptr: GuestPtr, len: u32) -> Result, Self::MemoryErr>; @@ -962,4 +965,38 @@ pub trait UserHost: GasMeteredMachine { self.say(value.into()); Ok(value) } + + // Initializes benchmark data related to a code block. + // A code block is defined by the instructions between start_benchmark and end_benchmark calls. + // If start_benchmark is called multiple times without end_benchmark being called, + // then only the last start_benchmark before end_benchmark will be used. + // It is possible to have multiple code blocks benchmarked in the same program. + fn start_benchmark(&mut self) -> Result<(), Self::Err> { + let ink_curr = self.ink_ready()?; + + let benchmark = self.benchmark(); + benchmark.timer = Some(Instant::now()); + benchmark.ink_start = Some(ink_curr); + + Ok(()) + } + + // Updates cumulative benchmark data related to a code block. + // If end_benchmark is called without a corresponding start_benchmark nothing will happen. + fn end_benchmark(&mut self) -> Result<(), Self::Err> { + let ink_curr = self.ink_ready()?; + + let benchmark = self.benchmark(); + if let Some(timer) = benchmark.timer { + benchmark.elapsed_total = benchmark.elapsed_total.saturating_add(timer.elapsed()); + + let code_block_ink = benchmark.ink_start.unwrap().saturating_sub(ink_curr); + benchmark.ink_total = benchmark.ink_total.saturating_add(code_block_ink); + + benchmark.timer = None; + benchmark.ink_start = None; + }; + + Ok(()) + } } diff --git a/arbitrator/wasm-libraries/user-host/src/program.rs b/arbitrator/wasm-libraries/user-host/src/program.rs index 7b3782b2e5..a2973ce56f 100644 --- a/arbitrator/wasm-libraries/user-host/src/program.rs +++ b/arbitrator/wasm-libraries/user-host/src/program.rs @@ -2,6 +2,7 @@ // For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE use arbutil::{ + benchmark::Benchmark, evm::{ api::{EvmApiMethod, Gas, Ink, VecReader, EVM_API_METHOD_REQ_OFFSET}, req::{EvmApiRequestor, RequestHandler}, @@ -75,6 +76,8 @@ pub(crate) struct Program { pub evm_api: EvmApiRequestor, /// EVM Context info. pub evm_data: EvmData, + // Used to benchmark execution blocks of code + pub benchmark: Benchmark, /// WAVM module index. pub module: u32, /// Call configuration. @@ -167,6 +170,7 @@ impl Program { outs: vec![], evm_api: EvmApiRequestor::new(UserHostRequester::default()), evm_data, + benchmark: Benchmark::default(), module, config, early_exit: None, @@ -237,6 +241,10 @@ impl UserHost for Program { &self.evm_data } + fn benchmark(&mut self) -> &mut Benchmark { + &mut self.benchmark + } + fn evm_return_data_len(&mut self) -> &mut u32 { &mut self.evm_data.return_data_len } diff --git a/arbitrator/wasm-libraries/user-test/src/program.rs b/arbitrator/wasm-libraries/user-test/src/program.rs index 299fca08c3..99252a38f0 100644 --- a/arbitrator/wasm-libraries/user-test/src/program.rs +++ b/arbitrator/wasm-libraries/user-test/src/program.rs @@ -3,6 +3,7 @@ use crate::{ARGS, EVER_PAGES, EVM_DATA, KEYS, LOGS, OPEN_PAGES, OUTS}; use arbutil::{ + benchmark::Benchmark, evm::{ api::{EvmApi, Gas, Ink, VecReader}, user::UserOutcomeKind, @@ -28,6 +29,7 @@ impl From for eyre::ErrReport { /// Mock type representing a `user_host::Program` pub struct Program { evm_api: MockEvmApi, + benchmark: Benchmark, } #[allow(clippy::unit_arg)] @@ -52,6 +54,10 @@ impl UserHost for Program { &EVM_DATA } + fn benchmark(&mut self) -> &mut Benchmark { + &mut self.benchmark + } + fn evm_return_data_len(&mut self) -> &mut u32 { unimplemented!() } @@ -91,6 +97,7 @@ impl Program { pub fn current() -> Self { Self { evm_api: MockEvmApi, + benchmark: Benchmark::default(), } } diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 45bd70c92b..70c5952042 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -1157,7 +1157,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) if err != nil { return false, err } - if arbOSVersion >= 20 { + if arbOSVersion >= params.ArbosVersion_20 { if config.IgnoreBlobPrice { use4844 = true } else { diff --git a/arbnode/delayed_seq_reorg_test.go b/arbnode/delayed_seq_reorg_test.go index f821d71e63..8bddfc9e73 100644 --- a/arbnode/delayed_seq_reorg_test.go +++ b/arbnode/delayed_seq_reorg_test.go @@ -49,7 +49,22 @@ func TestSequencerReorgFromDelayed(t *testing.T) { }, }, } - err = tracker.AddDelayedMessages([]*DelayedInboxMessage{initMsgDelayed, userDelayed}, false) + delayedRequestId2 := common.BigToHash(common.Big2) + userDelayed2 := &DelayedInboxMessage{ + BlockHash: [32]byte{}, + BeforeInboxAcc: userDelayed.AfterInboxAcc(), + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_EndOfBlock, + Poster: [20]byte{}, + BlockNumber: 0, + Timestamp: 0, + RequestId: &delayedRequestId2, + L1BaseFee: common.Big0, + }, + }, + } + err = tracker.AddDelayedMessages([]*DelayedInboxMessage{initMsgDelayed, userDelayed, userDelayed2}) Require(t, err) serializedInitMsgBatch := make([]byte, 40) @@ -76,8 +91,8 @@ func TestSequencerReorgFromDelayed(t *testing.T) { SequenceNumber: 1, BeforeInboxAcc: [32]byte{1}, AfterInboxAcc: [32]byte{2}, - AfterDelayedAcc: userDelayed.AfterInboxAcc(), - AfterDelayedCount: 2, + AfterDelayedAcc: userDelayed2.AfterInboxAcc(), + AfterDelayedCount: 3, TimeBounds: bridgegen.IBridgeTimeBounds{}, rawLog: types.Log{}, dataLocation: 0, @@ -90,8 +105,8 @@ func TestSequencerReorgFromDelayed(t *testing.T) { SequenceNumber: 2, BeforeInboxAcc: [32]byte{2}, AfterInboxAcc: [32]byte{3}, - AfterDelayedAcc: userDelayed.AfterInboxAcc(), - AfterDelayedCount: 2, + AfterDelayedAcc: userDelayed2.AfterInboxAcc(), + AfterDelayedCount: 3, TimeBounds: bridgegen.IBridgeTimeBounds{}, rawLog: types.Log{}, dataLocation: 0, @@ -101,28 +116,304 @@ func TestSequencerReorgFromDelayed(t *testing.T) { err = tracker.AddSequencerBatches(ctx, nil, []*SequencerInboxBatch{initMsgBatch, userMsgBatch, emptyBatch}) Require(t, err) - // Reorg out the user delayed message - err = tracker.ReorgDelayedTo(1, true) + msgCount, err := streamer.GetMessageCount() Require(t, err) + if msgCount != 3 { + Fail(t, "Unexpected tx streamer message count", msgCount, "(expected 3)") + } - msgCount, err := streamer.GetMessageCount() + delayedCount, err := tracker.GetDelayedCount() + Require(t, err) + if delayedCount != 3 { + Fail(t, "Unexpected tracker delayed message count", delayedCount, "(expected 3)") + } + + batchCount, err := tracker.GetBatchCount() + Require(t, err) + if batchCount != 3 { + Fail(t, "Unexpected tracker batch count", batchCount, "(expected 3)") + } + + // By modifying the timestamp of the userDelayed message, and adding it again, we cause a reorg + userDelayedModified := &DelayedInboxMessage{ + BlockHash: [32]byte{}, + BeforeInboxAcc: initMsgDelayed.AfterInboxAcc(), + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_EndOfBlock, + Poster: [20]byte{}, + BlockNumber: 0, + Timestamp: userDelayed.Message.Header.Timestamp + 1, + RequestId: &delayedRequestId, + L1BaseFee: common.Big0, + }, + }, + } + err = tracker.AddDelayedMessages([]*DelayedInboxMessage{userDelayedModified}) + Require(t, err) + + // userMsgBatch, and emptyBatch will be reorged out + msgCount, err = streamer.GetMessageCount() Require(t, err) if msgCount != 1 { Fail(t, "Unexpected tx streamer message count", msgCount, "(expected 1)") } + batchCount, err = tracker.GetBatchCount() + Require(t, err) + if batchCount != 1 { + Fail(t, "Unexpected tracker batch count", batchCount, "(expected 1)") + } + + // userDelayed2 will be deleted + delayedCount, err = tracker.GetDelayedCount() + Require(t, err) + if delayedCount != 2 { + Fail(t, "Unexpected tracker delayed message count", delayedCount, "(expected 2)") + } + + // guarantees that delayed msg 1 is userDelayedModified and not userDelayed + msg, err := tracker.GetDelayedMessage(ctx, 1) + Require(t, err) + if msg.Header.RequestId.Cmp(*userDelayedModified.Message.Header.RequestId) != 0 { + Fail(t, "Unexpected delayed message requestId", msg.Header.RequestId, "(expected", userDelayedModified.Message.Header.RequestId, ")") + } + if msg.Header.Timestamp != userDelayedModified.Message.Header.Timestamp { + Fail(t, "Unexpected delayed message timestamp", msg.Header.Timestamp, "(expected", userDelayedModified.Message.Header.Timestamp, ")") + } + if userDelayedModified.Message.Header.Timestamp == userDelayed.Message.Header.Timestamp { + Fail(t, "Unexpected delayed message timestamp", userDelayedModified.Message.Header.Timestamp, "(expected", userDelayed.Message.Header.Timestamp, ")") + } + + emptyBatch = &SequencerInboxBatch{ + BlockHash: [32]byte{}, + ParentChainBlockNumber: 0, + SequenceNumber: 1, + BeforeInboxAcc: [32]byte{1}, + AfterInboxAcc: [32]byte{2}, + AfterDelayedAcc: initMsgDelayed.AfterInboxAcc(), + AfterDelayedCount: 1, + TimeBounds: bridgegen.IBridgeTimeBounds{}, + rawLog: types.Log{}, + dataLocation: 0, + bridgeAddress: [20]byte{}, + serialized: serializedInitMsgBatch, + } + err = tracker.AddSequencerBatches(ctx, nil, []*SequencerInboxBatch{emptyBatch}) + Require(t, err) + + msgCount, err = streamer.GetMessageCount() + Require(t, err) + if msgCount != 2 { + Fail(t, "Unexpected tx streamer message count", msgCount, "(expected 2)") + } + + batchCount, err = tracker.GetBatchCount() + Require(t, err) + if batchCount != 2 { + Fail(t, "Unexpected tracker batch count", batchCount, "(expected 2)") + } +} + +func TestSequencerReorgFromLastDelayedMsg(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + exec, streamer, db, _ := NewTransactionStreamerForTest(t, common.Address{}) + tracker, err := NewInboxTracker(db, streamer, nil, DefaultSnapSyncConfig) + Require(t, err) + + err = streamer.Start(ctx) + Require(t, err) + exec.Start(ctx) + init, err := streamer.GetMessage(0) + Require(t, err) + + initMsgDelayed := &DelayedInboxMessage{ + BlockHash: [32]byte{}, + BeforeInboxAcc: [32]byte{}, + Message: init.Message, + } + delayedRequestId := common.BigToHash(common.Big1) + userDelayed := &DelayedInboxMessage{ + BlockHash: [32]byte{}, + BeforeInboxAcc: initMsgDelayed.AfterInboxAcc(), + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_EndOfBlock, + Poster: [20]byte{}, + BlockNumber: 0, + Timestamp: 0, + RequestId: &delayedRequestId, + L1BaseFee: common.Big0, + }, + }, + } + delayedRequestId2 := common.BigToHash(common.Big2) + userDelayed2 := &DelayedInboxMessage{ + BlockHash: [32]byte{}, + BeforeInboxAcc: userDelayed.AfterInboxAcc(), + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_EndOfBlock, + Poster: [20]byte{}, + BlockNumber: 0, + Timestamp: 0, + RequestId: &delayedRequestId2, + L1BaseFee: common.Big0, + }, + }, + } + err = tracker.AddDelayedMessages([]*DelayedInboxMessage{initMsgDelayed, userDelayed, userDelayed2}) + Require(t, err) + + serializedInitMsgBatch := make([]byte, 40) + binary.BigEndian.PutUint64(serializedInitMsgBatch[32:], 1) + initMsgBatch := &SequencerInboxBatch{ + BlockHash: [32]byte{}, + ParentChainBlockNumber: 0, + SequenceNumber: 0, + BeforeInboxAcc: [32]byte{}, + AfterInboxAcc: [32]byte{1}, + AfterDelayedAcc: initMsgDelayed.AfterInboxAcc(), + AfterDelayedCount: 1, + TimeBounds: bridgegen.IBridgeTimeBounds{}, + rawLog: types.Log{}, + dataLocation: 0, + bridgeAddress: [20]byte{}, + serialized: serializedInitMsgBatch, + } + serializedUserMsgBatch := make([]byte, 40) + binary.BigEndian.PutUint64(serializedUserMsgBatch[32:], 2) + userMsgBatch := &SequencerInboxBatch{ + BlockHash: [32]byte{}, + ParentChainBlockNumber: 0, + SequenceNumber: 1, + BeforeInboxAcc: [32]byte{1}, + AfterInboxAcc: [32]byte{2}, + AfterDelayedAcc: userDelayed2.AfterInboxAcc(), + AfterDelayedCount: 3, + TimeBounds: bridgegen.IBridgeTimeBounds{}, + rawLog: types.Log{}, + dataLocation: 0, + bridgeAddress: [20]byte{}, + serialized: serializedUserMsgBatch, + } + emptyBatch := &SequencerInboxBatch{ + BlockHash: [32]byte{}, + ParentChainBlockNumber: 0, + SequenceNumber: 2, + BeforeInboxAcc: [32]byte{2}, + AfterInboxAcc: [32]byte{3}, + AfterDelayedAcc: userDelayed2.AfterInboxAcc(), + AfterDelayedCount: 3, + TimeBounds: bridgegen.IBridgeTimeBounds{}, + rawLog: types.Log{}, + dataLocation: 0, + bridgeAddress: [20]byte{}, + serialized: serializedUserMsgBatch, + } + err = tracker.AddSequencerBatches(ctx, nil, []*SequencerInboxBatch{initMsgBatch, userMsgBatch, emptyBatch}) + Require(t, err) + + msgCount, err := streamer.GetMessageCount() + Require(t, err) + if msgCount != 3 { + Fail(t, "Unexpected tx streamer message count", msgCount, "(expected 3)") + } + delayedCount, err := tracker.GetDelayedCount() Require(t, err) - if delayedCount != 1 { - Fail(t, "Unexpected tracker delayed message count", delayedCount, "(expected 1)") + if delayedCount != 3 { + Fail(t, "Unexpected tracker delayed message count", delayedCount, "(expected 3)") } batchCount, err := tracker.GetBatchCount() Require(t, err) + if batchCount != 3 { + Fail(t, "Unexpected tracker batch count", batchCount, "(expected 3)") + } + + // Adding an already existing message alongside a new one shouldn't cause a reorg + delayedRequestId3 := common.BigToHash(common.Big3) + userDelayed3 := &DelayedInboxMessage{ + BlockHash: [32]byte{}, + BeforeInboxAcc: userDelayed2.AfterInboxAcc(), + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_EndOfBlock, + Poster: [20]byte{}, + BlockNumber: 0, + Timestamp: 0, + RequestId: &delayedRequestId3, + L1BaseFee: common.Big0, + }, + }, + } + err = tracker.AddDelayedMessages([]*DelayedInboxMessage{userDelayed2, userDelayed3}) + Require(t, err) + + msgCount, err = streamer.GetMessageCount() + Require(t, err) + if msgCount != 3 { + Fail(t, "Unexpected tx streamer message count", msgCount, "(expected 3)") + } + + batchCount, err = tracker.GetBatchCount() + Require(t, err) + if batchCount != 3 { + Fail(t, "Unexpected tracker batch count", batchCount, "(expected 3)") + } + + // By modifying the timestamp of the userDelayed2 message, and adding it again, we cause a reorg + userDelayed2Modified := &DelayedInboxMessage{ + BlockHash: [32]byte{}, + BeforeInboxAcc: userDelayed.AfterInboxAcc(), + Message: &arbostypes.L1IncomingMessage{ + Header: &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_EndOfBlock, + Poster: [20]byte{}, + BlockNumber: 0, + Timestamp: userDelayed2.Message.Header.Timestamp + 1, + RequestId: &delayedRequestId2, + L1BaseFee: common.Big0, + }, + }, + } + err = tracker.AddDelayedMessages([]*DelayedInboxMessage{userDelayed2Modified}) + Require(t, err) + + msgCount, err = streamer.GetMessageCount() + Require(t, err) + if msgCount != 1 { + Fail(t, "Unexpected tx streamer message count", msgCount, "(expected 1)") + } + + batchCount, err = tracker.GetBatchCount() + Require(t, err) if batchCount != 1 { Fail(t, "Unexpected tracker batch count", batchCount, "(expected 1)") } + delayedCount, err = tracker.GetDelayedCount() + Require(t, err) + if delayedCount != 3 { + Fail(t, "Unexpected tracker delayed message count", delayedCount, "(expected 3)") + } + + // guarantees that delayed msg 2 is userDelayedModified and not userDelayed + msg, err := tracker.GetDelayedMessage(ctx, 2) + Require(t, err) + if msg.Header.RequestId.Cmp(*userDelayed2Modified.Message.Header.RequestId) != 0 { + Fail(t, "Unexpected delayed message requestId", msg.Header.RequestId, "(expected", userDelayed2Modified.Message.Header.RequestId, ")") + } + if msg.Header.Timestamp != userDelayed2Modified.Message.Header.Timestamp { + Fail(t, "Unexpected delayed message timestamp", msg.Header.Timestamp, "(expected", userDelayed2Modified.Message.Header.Timestamp, ")") + } + if userDelayed2Modified.Message.Header.Timestamp == userDelayed2.Message.Header.Timestamp { + Fail(t, "Unexpected delayed message timestamp", userDelayed2Modified.Message.Header.Timestamp, "(expected", userDelayed2.Message.Header.Timestamp, ")") + } + emptyBatch = &SequencerInboxBatch{ BlockHash: [32]byte{}, ParentChainBlockNumber: 0, diff --git a/arbnode/inbox_reader.go b/arbnode/inbox_reader.go index 50893ca392..594ad79d05 100644 --- a/arbnode/inbox_reader.go +++ b/arbnode/inbox_reader.go @@ -28,7 +28,6 @@ import ( type InboxReaderConfig struct { DelayBlocks uint64 `koanf:"delay-blocks" reload:"hot"` CheckDelay time.Duration `koanf:"check-delay" reload:"hot"` - HardReorg bool `koanf:"hard-reorg" reload:"hot"` MinBlocksToRead uint64 `koanf:"min-blocks-to-read" reload:"hot"` DefaultBlocksToRead uint64 `koanf:"default-blocks-to-read" reload:"hot"` TargetMessagesRead uint64 `koanf:"target-messages-read" reload:"hot"` @@ -52,7 +51,6 @@ func (c *InboxReaderConfig) Validate() error { func InboxReaderConfigAddOptions(prefix string, f *flag.FlagSet) { f.Uint64(prefix+".delay-blocks", DefaultInboxReaderConfig.DelayBlocks, "number of latest blocks to ignore to reduce reorgs") f.Duration(prefix+".check-delay", DefaultInboxReaderConfig.CheckDelay, "the maximum time to wait between inbox checks (if not enough new blocks are found)") - f.Bool(prefix+".hard-reorg", DefaultInboxReaderConfig.HardReorg, "erase future transactions in addition to overwriting existing ones on reorg") f.Uint64(prefix+".min-blocks-to-read", DefaultInboxReaderConfig.MinBlocksToRead, "the minimum number of blocks to read at once (when caught up lowers load on L1)") f.Uint64(prefix+".default-blocks-to-read", DefaultInboxReaderConfig.DefaultBlocksToRead, "the default number of blocks to read at once (will vary based on traffic by default)") f.Uint64(prefix+".target-messages-read", DefaultInboxReaderConfig.TargetMessagesRead, "if adjust-blocks-to-read is enabled, the target number of messages to read at once") @@ -63,7 +61,6 @@ func InboxReaderConfigAddOptions(prefix string, f *flag.FlagSet) { var DefaultInboxReaderConfig = InboxReaderConfig{ DelayBlocks: 0, CheckDelay: time.Minute, - HardReorg: false, MinBlocksToRead: 1, DefaultBlocksToRead: 100, TargetMessagesRead: 500, @@ -74,7 +71,6 @@ var DefaultInboxReaderConfig = InboxReaderConfig{ var TestInboxReaderConfig = InboxReaderConfig{ DelayBlocks: 0, CheckDelay: time.Millisecond * 10, - HardReorg: false, MinBlocksToRead: 1, DefaultBlocksToRead: 100, TargetMessagesRead: 500, @@ -360,7 +356,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { missingDelayed = true } else if ourLatestDelayedCount > checkingDelayedCount { log.Info("backwards reorg of delayed messages", "from", ourLatestDelayedCount, "to", checkingDelayedCount) - err = r.tracker.ReorgDelayedTo(checkingDelayedCount, config.HardReorg) + err = r.tracker.ReorgDelayedTo(checkingDelayedCount) if err != nil { return err } @@ -397,11 +393,6 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { log.Debug("Expecting to find sequencer batches", "checkingBatchCount", checkingBatchCount, "ourLatestBatchCount", ourLatestBatchCount, "currentHeight", currentHeight) checkingBatchCount = ourLatestBatchCount missingSequencer = true - } else if ourLatestBatchCount > checkingBatchCount && config.HardReorg { - err = r.tracker.ReorgBatchesTo(checkingBatchCount) - if err != nil { - return err - } } if checkingBatchCount > 0 { checkingBatchSeqNum := checkingBatchCount - 1 @@ -630,7 +621,7 @@ func (r *InboxReader) run(ctx context.Context, hadError bool) error { } func (r *InboxReader) addMessages(ctx context.Context, sequencerBatches []*SequencerInboxBatch, delayedMessages []*DelayedInboxMessage) (bool, error) { - err := r.tracker.AddDelayedMessages(delayedMessages, r.config().HardReorg) + err := r.tracker.AddDelayedMessages(delayedMessages) if err != nil { return false, err } diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index d5afa142d8..87e84b3737 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -404,7 +404,7 @@ func (t *InboxTracker) GetDelayedMessageBytes(ctx context.Context, seqNum uint64 return msg.Serialize() } -func (t *InboxTracker) AddDelayedMessages(messages []*DelayedInboxMessage, hardReorg bool) error { +func (t *InboxTracker) AddDelayedMessages(messages []*DelayedInboxMessage) error { var nextAcc common.Hash firstDelayedMsgToKeep := uint64(0) if len(messages) == 0 { @@ -440,17 +440,15 @@ func (t *InboxTracker) AddDelayedMessages(messages []*DelayedInboxMessage, hardR t.mutex.Lock() defer t.mutex.Unlock() - if !hardReorg { - // This math is safe to do as we know len(messages) > 0 - haveLastAcc, err := t.GetDelayedAcc(pos + uint64(len(messages)) - 1) - if err == nil { - if haveLastAcc == messages[len(messages)-1].AfterInboxAcc() { - // We already have these delayed messages - return nil - } - } else if !errors.Is(err, AccumulatorNotFoundErr) { - return err + // This math is safe to do as we know len(messages) > 0 + haveLastAcc, err := t.GetDelayedAcc(pos + uint64(len(messages)) - 1) + if err == nil { + if haveLastAcc == messages[len(messages)-1].AfterInboxAcc() { + // We already have these delayed messages + return nil } + } else if !errors.Is(err, AccumulatorNotFoundErr) { + return err } if pos > firstDelayedMsgToKeep { @@ -464,6 +462,7 @@ func (t *InboxTracker) AddDelayedMessages(messages []*DelayedInboxMessage, hardR } } + firstPos := pos batch := t.db.NewBatch() for _, message := range messages { seqNum, err := message.Message.Header.SeqNum() @@ -480,6 +479,22 @@ func (t *InboxTracker) AddDelayedMessages(messages []*DelayedInboxMessage, hardR } nextAcc = message.AfterInboxAcc() + if firstPos == pos { + // Check if this message is a duplicate + haveAcc, err := t.GetDelayedAcc(seqNum) + if err == nil { + if haveAcc == nextAcc { + // Skip this message, as we already have it in our database + pos++ + firstPos++ + messages = messages[1:] + continue + } + } else if !errors.Is(err, AccumulatorNotFoundErr) { + return err + } + } + delayedMsgKey := dbKey(rlpDelayedMessagePrefix, seqNum) msgData, err := rlp.EncodeToBytes(message.Message) @@ -506,13 +521,16 @@ func (t *InboxTracker) AddDelayedMessages(messages []*DelayedInboxMessage, hardR pos++ } - return t.setDelayedCountReorgAndWriteBatch(batch, pos, true) + return t.setDelayedCountReorgAndWriteBatch(batch, firstPos, pos, true) } // All-in-one delayed message count adjuster. Can go forwards or backwards. // Requires the mutex is held. Sets the delayed count and performs any sequencer batch reorg necessary. // Also deletes any future delayed messages. -func (t *InboxTracker) setDelayedCountReorgAndWriteBatch(batch ethdb.Batch, newDelayedCount uint64, canReorgBatches bool) error { +func (t *InboxTracker) setDelayedCountReorgAndWriteBatch(batch ethdb.Batch, firstNewDelayedMessagePos uint64, newDelayedCount uint64, canReorgBatches bool) error { + if firstNewDelayedMessagePos > newDelayedCount { + return fmt.Errorf("firstNewDelayedMessagePos %v is after newDelayedCount %v", firstNewDelayedMessagePos, newDelayedCount) + } err := deleteStartingAt(t.db, batch, rlpDelayedMessagePrefix, uint64ToKey(newDelayedCount)) if err != nil { return err @@ -535,7 +553,7 @@ func (t *InboxTracker) setDelayedCountReorgAndWriteBatch(batch ethdb.Batch, newD return err } - seqBatchIter := t.db.NewIterator(delayedSequencedPrefix, uint64ToKey(newDelayedCount+1)) + seqBatchIter := t.db.NewIterator(delayedSequencedPrefix, uint64ToKey(firstNewDelayedMessagePos+1)) defer seqBatchIter.Release() var reorgSeqBatchesToCount *uint64 for seqBatchIter.Next() { @@ -856,7 +874,7 @@ func (t *InboxTracker) AddSequencerBatches(ctx context.Context, client *ethclien return nil } -func (t *InboxTracker) ReorgDelayedTo(count uint64, canReorgBatches bool) error { +func (t *InboxTracker) ReorgDelayedTo(count uint64) error { t.mutex.Lock() defer t.mutex.Unlock() @@ -871,7 +889,7 @@ func (t *InboxTracker) ReorgDelayedTo(count uint64, canReorgBatches bool) error return errors.New("attempted to reorg to future delayed count") } - return t.setDelayedCountReorgAndWriteBatch(t.db.NewBatch(), count, canReorgBatches) + return t.setDelayedCountReorgAndWriteBatch(t.db.NewBatch(), count, count, false) } func (t *InboxTracker) ReorgBatchesTo(count uint64) error { diff --git a/arbnode/message_pruner.go b/arbnode/message_pruner.go index 840a15f328..08f568796d 100644 --- a/arbnode/message_pruner.go +++ b/arbnode/message_pruner.go @@ -46,7 +46,7 @@ type MessagePrunerConfigFetcher func() *MessagePrunerConfig var DefaultMessagePrunerConfig = MessagePrunerConfig{ Enable: true, PruneInterval: time.Minute, - MinBatchesLeft: 2, + MinBatchesLeft: 1000, } func MessagePrunerConfigAddOptions(prefix string, f *flag.FlagSet) { diff --git a/arbnode/node.go b/arbnode/node.go index f2e3433ecd..d96c4001c4 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -285,19 +285,21 @@ type Node struct { } type SnapSyncConfig struct { - Enabled bool - PrevBatchMessageCount uint64 - PrevDelayedRead uint64 - BatchCount uint64 - DelayedCount uint64 + Enabled bool + PrevBatchMessageCount uint64 + PrevDelayedRead uint64 + BatchCount uint64 + DelayedCount uint64 + ParentChainAssertionBlock uint64 } var DefaultSnapSyncConfig = SnapSyncConfig{ - Enabled: false, - PrevBatchMessageCount: 0, - BatchCount: 0, - DelayedCount: 0, - PrevDelayedRead: 0, + Enabled: false, + PrevBatchMessageCount: 0, + PrevDelayedRead: 0, + BatchCount: 0, + DelayedCount: 0, + ParentChainAssertionBlock: 0, } type ConfigFetcher interface { @@ -596,7 +598,29 @@ func createNodeImpl( if err != nil { return nil, err } - inboxReader, err := NewInboxReader(inboxTracker, l1client, l1Reader, new(big.Int).SetUint64(deployInfo.DeployedAt), delayedBridge, sequencerInbox, func() *InboxReaderConfig { return &configFetcher.Get().InboxReader }) + firstMessageBlock := new(big.Int).SetUint64(deployInfo.DeployedAt) + if config.SnapSyncTest.Enabled { + batchCount := config.SnapSyncTest.BatchCount + delayedMessageNumber, err := exec.NextDelayedMessageNumber() + if err != nil { + return nil, err + } + if batchCount > delayedMessageNumber { + batchCount = delayedMessageNumber + } + // Find the first block containing the batch count. + // Subtract 1 to get the block before the needed batch count, + // this is done to fetch previous batch metadata needed for snap sync. + if batchCount > 0 { + batchCount-- + } + block, err := FindBlockContainingBatchCount(ctx, deployInfo.Bridge, l1client, config.SnapSyncTest.ParentChainAssertionBlock, batchCount) + if err != nil { + return nil, err + } + firstMessageBlock.SetUint64(block) + } + inboxReader, err := NewInboxReader(inboxTracker, l1client, l1Reader, firstMessageBlock, delayedBridge, sequencerInbox, func() *InboxReaderConfig { return &configFetcher.Get().InboxReader }) if err != nil { return nil, err } @@ -772,6 +796,53 @@ func createNodeImpl( }, nil } +func FindBlockContainingBatchCount(ctx context.Context, bridgeAddress common.Address, l1Client *ethclient.Client, parentChainAssertionBlock uint64, batchCount uint64) (uint64, error) { + bridge, err := bridgegen.NewIBridge(bridgeAddress, l1Client) + if err != nil { + return 0, err + } + high := parentChainAssertionBlock + low := uint64(0) + reduceBy := uint64(100) + if high > reduceBy { + low = high - reduceBy + } + // Reduce high and low by 100 until lowNode.InboxMaxCount < batchCount + // This will give us a range (low to high) of blocks that contain the batch count. + for low > 0 { + lowCount, err := bridge.SequencerMessageCount(&bind.CallOpts{Context: ctx, BlockNumber: new(big.Int).SetUint64(low)}) + if err != nil { + return 0, err + } + if lowCount.Uint64() > batchCount { + high = low + reduceBy = reduceBy * 2 + if low > reduceBy { + low = low - reduceBy + } else { + low = 0 + } + } else { + break + } + } + // Then binary search between low and high to find the block containing the batch count. + for low < high { + mid := low + (high-low)/2 + + midCount, err := bridge.SequencerMessageCount(&bind.CallOpts{Context: ctx, BlockNumber: new(big.Int).SetUint64(mid)}) + if err != nil { + return 0, err + } + if midCount.Uint64() < batchCount { + low = mid + 1 + } else { + high = mid + } + } + return low, nil +} + func (n *Node) OnConfigReload(_ *Config, _ *Config) error { // TODO return nil diff --git a/arbos/addressSet/addressSet.go b/arbos/addressSet/addressSet.go index 4bb87e614d..ccd780aa11 100644 --- a/arbos/addressSet/addressSet.go +++ b/arbos/addressSet/addressSet.go @@ -9,6 +9,7 @@ import ( "errors" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/storage" "github.com/offchainlabs/nitro/arbos/util" @@ -185,7 +186,7 @@ func (as *AddressSet) Remove(addr common.Address, arbosVersion uint64) error { if err != nil { return err } - if arbosVersion >= 11 { + if arbosVersion >= params.ArbosVersion_11 { err = as.byAddress.Set(atSize, util.UintToHash(slot)) if err != nil { return err diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index a3d1ae8386..de1a970b87 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -13,7 +13,6 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/triedb" "github.com/ethereum/go-ethereum/triedb/hashdb" @@ -63,9 +62,6 @@ type ArbosState struct { Burner burn.Burner } -const MaxArbosVersionSupported uint64 = params.ArbosVersion_StylusChargingFixes -const MaxDebugArbosVersionSupported uint64 = params.ArbosVersion_StylusChargingFixes - var ErrUninitializedArbOS = errors.New("ArbOS uninitialized") var ErrAlreadyInitialized = errors.New("ArbOS is already initialized") @@ -126,13 +122,13 @@ func NewArbosMemoryBackedArbOSState() (*ArbosState, *state.StateDB) { db := state.NewDatabaseWithConfig(raw, trieConfig) statedb, err := state.New(common.Hash{}, db, nil) if err != nil { - log.Crit("failed to init empty statedb", "error", err) + panic("failed to init empty statedb: " + err.Error()) } burner := burn.NewSystemBurner(nil, false) chainConfig := chaininfo.ArbitrumDevTestChainConfig() newState, err := InitializeArbosState(statedb, burner, chainConfig, arbostypes.TestInitMessage) if err != nil { - log.Crit("failed to open the ArbOS state", "error", err) + panic("failed to open the ArbOS state: " + err.Error()) } return newState, statedb } @@ -142,7 +138,7 @@ func ArbOSVersion(stateDB vm.StateDB) uint64 { backingStorage := storage.NewGeth(stateDB, burn.NewSystemBurner(nil, false)) arbosVersion, err := backingStorage.GetUint64ByUint64(uint64(versionOffset)) if err != nil { - log.Crit("failed to get the ArbOS version", "error", err) + panic("failed to get the ArbOS version: " + err.Error()) } return arbosVersion } @@ -205,7 +201,7 @@ func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *p _ = sto.SetUint64ByUint64(uint64(versionOffset), 1) // initialize to version 1; upgrade at end of this func if needed _ = sto.SetUint64ByUint64(uint64(upgradeVersionOffset), 0) _ = sto.SetUint64ByUint64(uint64(upgradeTimestampOffset), 0) - if desiredArbosVersion >= 2 { + if desiredArbosVersion >= params.ArbosVersion_2 { _ = sto.SetByUint64(uint64(networkFeeAccountOffset), util.AddressToHash(initialChainOwner)) } else { _ = sto.SetByUint64(uint64(networkFeeAccountOffset), common.Hash{}) // the 0 address until an owner sets it @@ -217,7 +213,7 @@ func InitializeArbosState(stateDB vm.StateDB, burner burn.Burner, chainConfig *p _ = sto.SetUint64ByUint64(uint64(brotliCompressionLevelOffset), 0) // default brotliCompressionLevel for fast compression is 0 initialRewardsRecipient := l1pricing.BatchPosterAddress - if desiredArbosVersion >= 2 { + if desiredArbosVersion >= params.ArbosVersion_2 { initialRewardsRecipient = initialChainOwner } _ = l1pricing.InitializeL1PricingState(sto.OpenCachedSubStorage(l1PricingSubspace), initialRewardsRecipient, initMessage.InitialL1BaseFee) @@ -274,29 +270,29 @@ func (state *ArbosState) UpgradeArbosVersion( nextArbosVersion := state.arbosVersion + 1 switch nextArbosVersion { - case 2: + case params.ArbosVersion_2: ensure(state.l1PricingState.SetLastSurplus(common.Big0, 1)) - case 3: + case params.ArbosVersion_3: ensure(state.l1PricingState.SetPerBatchGasCost(0)) ensure(state.l1PricingState.SetAmortizedCostCapBips(math.MaxUint64)) - case 4: + case params.ArbosVersion_4: // no state changes needed - case 5: + case params.ArbosVersion_5: // no state changes needed - case 6: + case params.ArbosVersion_6: // no state changes needed - case 7: + case params.ArbosVersion_7: // no state changes needed - case 8: + case params.ArbosVersion_8: // no state changes needed - case 9: + case params.ArbosVersion_9: // no state changes needed - case 10: + case params.ArbosVersion_10: ensure(state.l1PricingState.SetL1FeesAvailable(stateDB.GetBalance( l1pricing.L1PricerFundsPoolAddress, ).ToBig())) - case 11: + case params.ArbosVersion_11: // Update the PerBatchGasCost to a more accurate value compared to the old v6 default. ensure(state.l1PricingState.SetPerBatchGasCost(l1pricing.InitialPerBatchGasCostV12)) @@ -316,23 +312,23 @@ func (state *ArbosState) UpgradeArbosVersion( case 12, 13, 14, 15, 16, 17, 18, 19: // these versions are left to Orbit chains for custom upgrades. - case 20: + case params.ArbosVersion_20: // Update Brotli compression level for fast compression from 0 to 1 ensure(state.SetBrotliCompressionLevel(1)) case 21, 22, 23, 24, 25, 26, 27, 28, 29: // these versions are left to Orbit chains for custom upgrades. - case 30: + case params.ArbosVersion_30: programs.Initialize(state.backingStorage.OpenSubStorage(programsSubspace)) - case 31: + case params.ArbosVersion_31: params, err := state.Programs().Params() ensure(err) ensure(params.UpgradeToVersion(2)) ensure(params.Save()) - case 32: + case params.ArbosVersion_32: // no change state needed default: @@ -353,8 +349,8 @@ func (state *ArbosState) UpgradeArbosVersion( state.arbosVersion = nextArbosVersion } - if firstTime && upgradeTo >= 6 { - if upgradeTo < 11 { + if firstTime && upgradeTo >= params.ArbosVersion_6 { + if upgradeTo < params.ArbosVersion_11 { state.Restrict(state.l1PricingState.SetPerBatchGasCost(l1pricing.InitialPerBatchGasCostV6)) } state.Restrict(state.l1PricingState.SetEquilibrationUnits(l1pricing.InitialEquilibrationUnitsV6)) diff --git a/arbos/arbosState/initialize.go b/arbos/arbosState/initialize.go index 8fd417c2b2..840204382c 100644 --- a/arbos/arbosState/initialize.go +++ b/arbos/arbosState/initialize.go @@ -66,7 +66,7 @@ func InitializeArbosInDatabase(db ethdb.Database, cacheConfig *core.CacheConfig, }() statedb, err := state.New(common.Hash{}, stateDatabase, nil) if err != nil { - log.Crit("failed to init empty statedb", "error", err) + panic("failed to init empty statedb :" + err.Error()) } noStateTrieChangesToCommitError := regexp.MustCompile("^triedb layer .+ is disk layer$") @@ -96,7 +96,7 @@ func InitializeArbosInDatabase(db ethdb.Database, cacheConfig *core.CacheConfig, burner := burn.NewSystemBurner(nil, false) arbosState, err := InitializeArbosState(statedb, burner, chainConfig, initMessage) if err != nil { - log.Crit("failed to open the ArbOS state", "error", err) + panic("failed to open the ArbOS state :" + err.Error()) } chainOwner, err := initData.GetChainOwner() diff --git a/arbos/block_processor.go b/arbos/block_processor.go index 77475856ac..a06034f905 100644 --- a/arbos/block_processor.go +++ b/arbos/block_processor.go @@ -115,11 +115,12 @@ func createNewHeader(prevHeader *types.Header, l1info *L1Info, state *arbosState type ConditionalOptionsForTx []*arbitrum_types.ConditionalOptions type SequencingHooks struct { - TxErrors []error - DiscardInvalidTxsEarly bool - PreTxFilter func(*params.ChainConfig, *types.Header, *state.StateDB, *arbosState.ArbosState, *types.Transaction, *arbitrum_types.ConditionalOptions, common.Address, *L1Info) error - PostTxFilter func(*types.Header, *arbosState.ArbosState, *types.Transaction, common.Address, uint64, *core.ExecutionResult) error - ConditionalOptionsForTx []*arbitrum_types.ConditionalOptions + TxErrors []error // This can be unset + DiscardInvalidTxsEarly bool // This can be unset + PreTxFilter func(*params.ChainConfig, *types.Header, *state.StateDB, *arbosState.ArbosState, *types.Transaction, *arbitrum_types.ConditionalOptions, common.Address, *L1Info) error // This has to be set + PostTxFilter func(*types.Header, *state.StateDB, *arbosState.ArbosState, *types.Transaction, common.Address, uint64, *core.ExecutionResult) error // This has to be set + BlockFilter func(*types.Header, *state.StateDB, types.Transactions, types.Receipts) error // This can be unset + ConditionalOptionsForTx []*arbitrum_types.ConditionalOptions // This can be unset } func NoopSequencingHooks() *SequencingHooks { @@ -129,10 +130,11 @@ func NoopSequencingHooks() *SequencingHooks { func(*params.ChainConfig, *types.Header, *state.StateDB, *arbosState.ArbosState, *types.Transaction, *arbitrum_types.ConditionalOptions, common.Address, *L1Info) error { return nil }, - func(*types.Header, *arbosState.ArbosState, *types.Transaction, common.Address, uint64, *core.ExecutionResult) error { + func(*types.Header, *state.StateDB, *arbosState.ArbosState, *types.Transaction, common.Address, uint64, *core.ExecutionResult) error { return nil }, nil, + nil, } } @@ -172,7 +174,7 @@ func ProduceBlockAdvanced( runMode core.MessageRunMode, ) (*types.Block, types.Receipts, error) { - state, err := arbosState.OpenSystemArbosState(statedb, nil, true) + arbState, err := arbosState.OpenSystemArbosState(statedb, nil, true) if err != nil { return nil, nil, err } @@ -189,11 +191,11 @@ func ProduceBlockAdvanced( l1Timestamp: l1Header.Timestamp, } - header := createNewHeader(lastBlockHeader, l1Info, state, chainConfig) + header := createNewHeader(lastBlockHeader, l1Info, arbState, chainConfig) signer := types.MakeSigner(chainConfig, header.Number, header.Time) // Note: blockGasLeft will diverge from the actual gas left during execution in the event of invalid txs, // but it's only used as block-local representation limiting the amount of work done in a block. - blockGasLeft, _ := state.L2PricingState().PerBlockGasLimit() + blockGasLeft, _ := arbState.L2PricingState().PerBlockGasLimit() l1BlockNum := l1Info.l1BlockNumber // Prepend a tx before all others to touch up the state (update the L1 block num, pricing pools, etc) @@ -226,7 +228,7 @@ func ProduceBlockAdvanced( if !ok { return nil, nil, errors.New("retryable tx is somehow not a retryable") } - retryable, _ := state.RetryableState().OpenRetryable(retry.TicketId, time) + retryable, _ := arbState.RetryableState().OpenRetryable(retry.TicketId, time) if retryable == nil { // retryable was already deleted continue @@ -263,22 +265,22 @@ func ProduceBlockAdvanced( return nil, nil, err } - if err = hooks.PreTxFilter(chainConfig, header, statedb, state, tx, options, sender, l1Info); err != nil { + if err = hooks.PreTxFilter(chainConfig, header, statedb, arbState, tx, options, sender, l1Info); err != nil { return nil, nil, err } // Additional pre-transaction validity check - if err = extraPreTxFilter(chainConfig, header, statedb, state, tx, options, sender, l1Info); err != nil { + if err = extraPreTxFilter(chainConfig, header, statedb, arbState, tx, options, sender, l1Info); err != nil { return nil, nil, err } if basefee.Sign() > 0 { dataGas = math.MaxUint64 - brotliCompressionLevel, err := state.BrotliCompressionLevel() + brotliCompressionLevel, err := arbState.BrotliCompressionLevel() if err != nil { return nil, nil, fmt.Errorf("failed to get brotli compression level: %w", err) } - posterCost, _ := state.L1PricingState().GetPosterInfo(tx, poster, brotliCompressionLevel) + posterCost, _ := arbState.L1PricingState().GetPosterInfo(tx, poster, brotliCompressionLevel) posterCostInL2Gas := arbmath.BigDiv(posterCost, basefee) if posterCostInL2Gas.IsUint64() { @@ -322,18 +324,20 @@ func ProduceBlockAdvanced( vm.Config{}, runMode, func(result *core.ExecutionResult) error { - return hooks.PostTxFilter(header, state, tx, sender, dataGas, result) + return hooks.PostTxFilter(header, statedb, arbState, tx, sender, dataGas, result) }, ) if err != nil { // Ignore this transaction if it's invalid under the state transition function statedb.RevertToSnapshot(snap) + statedb.ClearTxFilter() return nil, nil, err } // Additional post-transaction validity check - if err = extraPostTxFilter(chainConfig, header, statedb, state, tx, options, sender, l1Info, result); err != nil { + if err = extraPostTxFilter(chainConfig, header, statedb, arbState, tx, options, sender, l1Info, result); err != nil { statedb.RevertToSnapshot(snap) + statedb.ClearTxFilter() return nil, nil, err } @@ -363,13 +367,13 @@ func ProduceBlockAdvanced( if tx.Type() == types.ArbitrumInternalTxType { // ArbOS might have upgraded to a new version, so we need to refresh our state - state, err = arbosState.OpenSystemArbosState(statedb, nil, true) + arbState, err = arbosState.OpenSystemArbosState(statedb, nil, true) if err != nil { return nil, nil, err } // Update the ArbOS version in the header (if it changed) extraInfo := types.DeserializeHeaderExtraInformation(header) - extraInfo.ArbOSFormatVersion = state.ArbOSVersion() + extraInfo.ArbOSFormatVersion = arbState.ArbOSVersion() extraInfo.UpdateHeaderWithInfo(header) } @@ -455,6 +459,16 @@ func ProduceBlockAdvanced( } } + if statedb.IsTxFiltered() { + return nil, nil, state.ErrArbTxFilter + } + + if sequencingHooks.BlockFilter != nil { + if err = sequencingHooks.BlockFilter(header, statedb, complete, receipts); err != nil { + return nil, nil, err + } + } + binary.BigEndian.PutUint64(header.Nonce[:], delayedMessagesRead) FinalizeBlock(header, complete, statedb, chainConfig) diff --git a/arbos/blockhash/blockhash.go b/arbos/blockhash/blockhash.go index ff29bbca9a..df5078fd2c 100644 --- a/arbos/blockhash/blockhash.go +++ b/arbos/blockhash/blockhash.go @@ -8,6 +8,7 @@ import ( "errors" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/storage" ) @@ -56,7 +57,7 @@ func (bh *Blockhashes) RecordNewL1Block(number uint64, blockHash common.Hash, ar // fill in hashes for any "skipped over" blocks nextNumber++ var nextNumBuf [8]byte - if arbosVersion >= 8 { + if arbosVersion >= params.ArbosVersion_8 { binary.LittleEndian.PutUint64(nextNumBuf[:], nextNumber) } diff --git a/arbos/blockhash/blockhash_test.go b/arbos/blockhash/blockhash_test.go index c7cc04d966..8dec2181a3 100644 --- a/arbos/blockhash/blockhash_test.go +++ b/arbos/blockhash/blockhash_test.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/burn" "github.com/offchainlabs/nitro/arbos/storage" @@ -15,7 +16,7 @@ import ( ) func TestBlockhash(t *testing.T) { - arbosVersion := uint64(8) + arbosVersion := params.ArbosVersion_8 sto := storage.NewMemoryBacked(burn.NewSystemBurner(nil, false)) InitializeBlockhashes(sto) diff --git a/arbos/internal_tx.go b/arbos/internal_tx.go index 64dede6290..0ecdfe74cf 100644 --- a/arbos/internal_tx.go +++ b/arbos/internal_tx.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/util" @@ -56,11 +57,11 @@ func ApplyInternalTxUpdate(tx *types.ArbitrumInternalTx, state *arbosState.Arbos l1BlockNumber := util.SafeMapGet[uint64](inputs, "l1BlockNumber") timePassed := util.SafeMapGet[uint64](inputs, "timePassed") - if state.ArbOSVersion() < 3 { + if state.ArbOSVersion() < params.ArbosVersion_3 { // (incorrectly) use the L2 block number instead timePassed = util.SafeMapGet[uint64](inputs, "l2BlockNumber") } - if state.ArbOSVersion() < 8 { + if state.ArbOSVersion() < params.ArbosVersion_8 { // in old versions we incorrectly used an L1 block number one too high l1BlockNumber++ } diff --git a/arbos/l1pricing/l1PricingOldVersions.go b/arbos/l1pricing/l1PricingOldVersions.go index 1377351af3..e4cbf5e1b3 100644 --- a/arbos/l1pricing/l1PricingOldVersions.go +++ b/arbos/l1pricing/l1PricingOldVersions.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/util" am "github.com/offchainlabs/nitro/util/arbmath" @@ -24,7 +25,7 @@ func (ps *L1PricingState) _preversion10_UpdateForBatchPosterSpending( l1Basefee *big.Int, scenario util.TracingScenario, ) error { - if arbosVersion < 2 { + if arbosVersion < params.ArbosVersion_2 { return ps._preVersion2_UpdateForBatchPosterSpending(statedb, evm, updateTime, currentTime, batchPoster, weiSpent, scenario) } @@ -69,7 +70,7 @@ func (ps *L1PricingState) _preversion10_UpdateForBatchPosterSpending( } // impose cap on amortized cost, if there is one - if arbosVersion >= 3 { + if arbosVersion >= params.ArbosVersion_3 { amortizedCostCapBips, err := ps.AmortizedCostCapBips() if err != nil { return err diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index 37dae08c33..195df3708c 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "math/big" - "sync/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" @@ -216,7 +215,7 @@ func (ps *L1PricingState) LastSurplus() (*big.Int, error) { } func (ps *L1PricingState) SetLastSurplus(val *big.Int, arbosVersion uint64) error { - if arbosVersion < 7 { + if arbosVersion < params.ArbosVersion_7 { return ps.lastSurplus.Set_preVersion7(val) } return ps.lastSurplus.SetSaturatingWithWarning(val, "L1 pricer last surplus") @@ -309,7 +308,7 @@ func (ps *L1PricingState) UpdateForBatchPosterSpending( l1Basefee *big.Int, scenario util.TracingScenario, ) error { - if arbosVersion < 10 { + if arbosVersion < params.ArbosVersion_10 { return ps._preversion10_UpdateForBatchPosterSpending(statedb, evm, arbosVersion, updateTime, currentTime, batchPoster, weiSpent, l1Basefee, scenario) } @@ -359,7 +358,7 @@ func (ps *L1PricingState) UpdateForBatchPosterSpending( } // impose cap on amortized cost, if there is one - if arbosVersion >= 3 { + if arbosVersion >= params.ArbosVersion_3 { amortizedCostCapBips, err := ps.AmortizedCostCapBips() if err != nil { return err @@ -520,10 +519,13 @@ func (ps *L1PricingState) GetPosterInfo(tx *types.Transaction, poster common.Add if poster != BatchPosterAddress { return common.Big0, 0 } - units := atomic.LoadUint64(&tx.CalldataUnits) - if units == 0 { + var units uint64 + if cachedUnits := tx.GetCachedCalldataUnits(brotliCompressionLevel); cachedUnits != nil { + units = *cachedUnits + } else { + // The cache is empty or invalid, so we need to compute the calldata units units = ps.getPosterUnitsWithoutCache(tx, poster, brotliCompressionLevel) - atomic.StoreUint64(&tx.CalldataUnits, units) + tx.SetCachedCalldataUnits(brotliCompressionLevel, units) } // Approximate the l1 fee charged for posting this tx's calldata diff --git a/arbos/programs/api.go b/arbos/programs/api.go index d8f12ffbd3..a622f55397 100644 --- a/arbos/programs/api.go +++ b/arbos/programs/api.go @@ -4,12 +4,13 @@ package programs import ( + "strconv" + "github.com/holiman/uint256" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/util" @@ -151,7 +152,7 @@ func newApiClosures( case vm.STATICCALL: ret, returnGas, err = evm.StaticCall(scope.Contract, contract, input, gas) default: - log.Crit("unsupported call type", "opcode", opcode) + panic("unsupported call type: " + opcode.String()) } interpreter.SetReturnData(ret) @@ -266,7 +267,7 @@ func newApiClosures( original := input crash := func(reason string) { - log.Crit("bad API call", "reason", reason, "request", req, "len", len(original), "remaining", len(input)) + panic("bad API call reason: " + reason + " request: " + strconv.Itoa(int(req)) + " len: " + strconv.Itoa(len(original)) + " remaining: " + strconv.Itoa(len(input))) } takeInput := func(needed int, reason string) []byte { if len(input) < needed { @@ -338,7 +339,7 @@ func newApiClosures( case StaticCall: opcode = vm.STATICCALL default: - log.Crit("unsupported call type", "opcode", opcode) + panic("unsupported call type opcode: " + opcode.String()) } contract := takeAddress() value := takeU256() @@ -414,8 +415,7 @@ func newApiClosures( captureHostio(name, args, outs, startInk, endInk) return []byte{}, nil, 0 default: - log.Crit("unsupported call type", "req", req) - return []byte{}, nil, 0 + panic("unsupported call type: " + strconv.Itoa(int(req))) } } } diff --git a/arbos/programs/native.go b/arbos/programs/native.go index cfc1170c5b..a996d50d8a 100644 --- a/arbos/programs/native.go +++ b/arbos/programs/native.go @@ -109,7 +109,7 @@ func activateProgramInternal( (*u64)(gasLeft), )) - module, msg, err := status_mod.toResult(output.intoBytes(), debug) + module, msg, err := status_mod.toResult(rustBytesIntoBytes(output), debug) if err != nil { if debug { log.Warn("activation failed", "err", err, "msg", msg, "program", addressForLogging) @@ -119,7 +119,7 @@ func activateProgramInternal( } return nil, nil, err } - hash := moduleHash.toHash() + hash := bytes32ToHash(moduleHash) targets := db.Database().WasmTargets() type result struct { target ethdb.WasmTarget @@ -141,7 +141,7 @@ func activateProgramInternal( goSlice([]byte(target)), output, ) - asm := output.intoBytes() + asm := rustBytesIntoBytes(output) if status_asm != 0 { results <- result{target, nil, fmt.Errorf("%w: %s", ErrProgramActivation, string(asm))} return @@ -279,7 +279,7 @@ func callProgram( )) depth := interpreter.Depth() - data, msg, err := status.toResult(output.intoBytes(), debug) + data, msg, err := status.toResult(rustBytesIntoBytes(output), debug) if status == userFailure && debug { log.Warn("program failure", "err", err, "msg", msg, "program", address, "depth", depth) } @@ -292,7 +292,7 @@ func callProgram( //export handleReqImpl func handleReqImpl(apiId usize, req_type u32, data *rustSlice, costPtr *u64, out_response *C.GoSliceData, out_raw_data *C.GoSliceData) { api := getApi(apiId) - reqData := data.read() + reqData := readRustSlice(data) reqType := RequestType(req_type - EvmApiMethodReqOffset) response, raw_data, cost := api.handler(reqType, reqData) *costPtr = u64(cost) @@ -418,14 +418,14 @@ func SetTarget(name ethdb.WasmTarget, description string, native bool) error { cbool(native), )) if status != userSuccess { - msg := arbutil.ToStringOrHex(output.intoBytes()) + msg := arbutil.ToStringOrHex(rustBytesIntoBytes(output)) log.Error("failed to set stylus compilation target", "status", status, "msg", msg) return fmt.Errorf("failed to set stylus compilation target, status %v: %v", status, msg) } return nil } -func (value bytes32) toHash() common.Hash { +func bytes32ToHash(value *bytes32) common.Hash { hash := common.Hash{} for index, b := range value.bytes { hash[index] = byte(b) @@ -449,27 +449,27 @@ func addressToBytes20(addr common.Address) bytes20 { return value } -func (slice *rustSlice) read() []byte { +func readRustSlice(slice *rustSlice) []byte { if slice.len == 0 { return nil } return arbutil.PointerToSlice((*byte)(slice.ptr), int(slice.len)) } -func (vec *rustBytes) read() []byte { +func readRustBytes(vec *rustBytes) []byte { if vec.len == 0 { return nil } return arbutil.PointerToSlice((*byte)(vec.ptr), int(vec.len)) } -func (vec *rustBytes) intoBytes() []byte { - slice := vec.read() - vec.drop() +func rustBytesIntoBytes(vec *rustBytes) []byte { + slice := readRustBytes(vec) + dropRustBytes(vec) return slice } -func (vec *rustBytes) drop() { +func dropRustBytes(vec *rustBytes) { C.free_rust_bytes(*vec) } diff --git a/arbos/programs/native_api.go b/arbos/programs/native_api.go index ab15800ef9..ad8cc0477b 100644 --- a/arbos/programs/native_api.go +++ b/arbos/programs/native_api.go @@ -25,11 +25,11 @@ import "C" import ( "runtime" + "strconv" "sync" "sync/atomic" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/arbutil" @@ -69,11 +69,11 @@ func newApi( func getApi(id usize) NativeApi { any, ok := apiObjects.Load(uintptr(id)) if !ok { - log.Crit("failed to load stylus Go API", "id", id) + panic("failed to load stylus Go API id: " + strconv.Itoa(int(id))) } api, ok := any.(NativeApi) if !ok { - log.Crit("wrong type for stylus Go API", "id", id) + panic("wrong type for stylus Go API id: " + strconv.Itoa(int(id))) } return api } diff --git a/arbos/programs/programs.go b/arbos/programs/programs.go index 06ba6ead8c..524fcc0be7 100644 --- a/arbos/programs/programs.go +++ b/arbos/programs/programs.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" gethParams "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbcompress" @@ -163,6 +164,21 @@ func (p Programs) ActivateProgram(evm *vm.EVM, address common.Address, arbosVers return stylusVersion, codeHash, info.moduleHash, dataFee, false, p.setProgram(codeHash, programData) } +func runModeToString(runmode core.MessageRunMode) string { + switch runmode { + case core.MessageCommitMode: + return "commit_runmode" + case core.MessageGasEstimationMode: + return "gas_estimation_runmode" + case core.MessageEthcallMode: + return "eth_call_runmode" + case core.MessageReplayMode: + return "replay_runmode" + default: + return "unknown_runmode" + } +} + func (p Programs) CallProgram( scope *vm.ScopeContext, statedb vm.StateDB, @@ -219,8 +235,7 @@ func (p Programs) CallProgram( localAsm, err := getLocalAsm(statedb, moduleHash, contract.Address(), contract.Code, contract.CodeHash, params.PageLimit, evm.Context.Time, debugMode, program) if err != nil { - log.Crit("failed to get local wasm for activated program", "program", contract.Address()) - return nil, err + panic("failed to get local wasm for activated program: " + contract.Address().Hex()) } evmData := &EvmData{ @@ -250,17 +265,23 @@ func (p Programs) CallProgram( if runmode == core.MessageCommitMode { arbos_tag = statedb.Database().WasmCacheTag() } + + metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/program_calls/%s", runModeToString(runmode)), nil).Inc(1) ret, err := callProgram(address, moduleHash, localAsm, scope, interpreter, tracingInfo, calldata, evmData, goParams, model, arbos_tag) if len(ret) > 0 && arbosVersion >= gethParams.ArbosVersion_StylusFixes { // Ensure that return data costs as least as much as it would in the EVM. evmCost := evmMemoryCost(uint64(len(ret))) if startingGas < evmCost { contract.Gas = 0 + // #nosec G115 + metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/gas_used/%s", runModeToString(runmode)), nil).Inc(int64(startingGas)) return nil, vm.ErrOutOfGas } maxGasToReturn := startingGas - evmCost contract.Gas = am.MinInt(contract.Gas, maxGasToReturn) } + // #nosec G115 + metrics.GetOrRegisterCounter(fmt.Sprintf("arb/arbos/stylus/gas_used/%s", runModeToString(runmode)), nil).Inc(int64(startingGas - contract.Gas)) return ret, err } diff --git a/arbos/programs/testcompile.go b/arbos/programs/testcompile.go index 8a4e38444a..58afa228d5 100644 --- a/arbos/programs/testcompile.go +++ b/arbos/programs/testcompile.go @@ -35,10 +35,10 @@ func Wat2Wasm(wat []byte) ([]byte, error) { status := C.wat_to_wasm(goSlice(wat), output) if status != 0 { - return nil, fmt.Errorf("failed reading wat file: %v", string(output.intoBytes())) + return nil, fmt.Errorf("failed reading wat file: %v", string(rustBytesIntoBytes(output))) } - return output.intoBytes(), nil + return rustBytesIntoBytes(output), nil } func testCompileArch(store bool) error { @@ -66,7 +66,7 @@ func testCompileArch(store bool) error { cbool(nativeArm64)) if status != 0 { - return fmt.Errorf("failed setting compilation target arm: %v", string(output.intoBytes())) + return fmt.Errorf("failed setting compilation target arm: %v", string(rustBytesIntoBytes(output))) } status = C.stylus_target_set(goSlice(amd64CompileName), @@ -75,7 +75,7 @@ func testCompileArch(store bool) error { cbool(nativeAmd64)) if status != 0 { - return fmt.Errorf("failed setting compilation target amd: %v", string(output.intoBytes())) + return fmt.Errorf("failed setting compilation target amd: %v", string(rustBytesIntoBytes(output))) } source, err := os.ReadFile("../../arbitrator/stylus/tests/add.wat") @@ -107,7 +107,7 @@ func testCompileArch(store bool) error { output, ) if status == 0 { - return fmt.Errorf("succeeded compiling non-existent arch: %v", string(output.intoBytes())) + return fmt.Errorf("succeeded compiling non-existent arch: %v", string(rustBytesIntoBytes(output))) } status = C.stylus_compile( @@ -118,7 +118,7 @@ func testCompileArch(store bool) error { output, ) if status != 0 { - return fmt.Errorf("failed compiling native: %v", string(output.intoBytes())) + return fmt.Errorf("failed compiling native: %v", string(rustBytesIntoBytes(output))) } if store && !nativeAmd64 && !nativeArm64 { _, err := fmt.Printf("writing host file\n") @@ -126,7 +126,7 @@ func testCompileArch(store bool) error { return err } - err = os.WriteFile("../../target/testdata/host.bin", output.intoBytes(), 0644) + err = os.WriteFile("../../target/testdata/host.bin", rustBytesIntoBytes(output), 0644) if err != nil { return err } @@ -140,7 +140,7 @@ func testCompileArch(store bool) error { output, ) if status != 0 { - return fmt.Errorf("failed compiling arm: %v", string(output.intoBytes())) + return fmt.Errorf("failed compiling arm: %v", string(rustBytesIntoBytes(output))) } if store { _, err := fmt.Printf("writing arm file\n") @@ -148,7 +148,7 @@ func testCompileArch(store bool) error { return err } - err = os.WriteFile("../../target/testdata/arm64.bin", output.intoBytes(), 0644) + err = os.WriteFile("../../target/testdata/arm64.bin", rustBytesIntoBytes(output), 0644) if err != nil { return err } @@ -162,7 +162,7 @@ func testCompileArch(store bool) error { output, ) if status != 0 { - return fmt.Errorf("failed compiling amd: %v", string(output.intoBytes())) + return fmt.Errorf("failed compiling amd: %v", string(rustBytesIntoBytes(output))) } if store { _, err := fmt.Printf("writing amd64 file\n") @@ -170,7 +170,7 @@ func testCompileArch(store bool) error { return err } - err = os.WriteFile("../../target/testdata/amd64.bin", output.intoBytes(), 0644) + err = os.WriteFile("../../target/testdata/amd64.bin", rustBytesIntoBytes(output), 0644) if err != nil { return err } @@ -195,7 +195,7 @@ func resetNativeTarget() error { cbool(true)) if status != 0 { - return fmt.Errorf("failed setting compilation target arm: %v", string(output.intoBytes())) + return fmt.Errorf("failed setting compilation target arm: %v", string(rustBytesIntoBytes(output))) } return nil @@ -260,7 +260,7 @@ func testCompileLoad() error { return err } - _, msg, err := status.toResult(output.intoBytes(), true) + _, msg, err := status.toResult(rustBytesIntoBytes(output), true) if status == userFailure { err = fmt.Errorf("%w: %v", err, msg) } diff --git a/arbos/tx_processor.go b/arbos/tx_processor.go index aec08b15b5..7cebd8da37 100644 --- a/arbos/tx_processor.go +++ b/arbos/tx_processor.go @@ -307,7 +307,7 @@ func (p *TxProcessor) StartTxHook() (endTxNow bool, gasUsed uint64, err error, r // pay for the retryable's gas and update the pools gascost := arbmath.BigMulByUint(effectiveBaseFee, usergas) networkCost := gascost - if p.state.ArbOSVersion() >= 11 { + if p.state.ArbOSVersion() >= params.ArbosVersion_11 { infraFeeAccount, err := p.state.InfraFeeAccount() p.state.Restrict(err) if infraFeeAccount != (common.Address{}) { @@ -576,7 +576,7 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { takeFunds(maxRefund, arbmath.BigMulByUint(effectiveBaseFee, gasUsed)) // Refund any unused gas, without overdrafting the L1 deposit. networkRefund := gasRefund - if p.state.ArbOSVersion() >= 11 { + if p.state.ArbOSVersion() >= params.ArbosVersion_11 { infraFeeAccount, err := p.state.InfraFeeAccount() p.state.Restrict(err) if infraFeeAccount != (common.Address{}) { @@ -629,7 +629,7 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { } purpose := "feeCollection" - if p.state.ArbOSVersion() > 4 { + if p.state.ArbOSVersion() > params.ArbosVersion_4 { infraFeeAccount, err := p.state.InfraFeeAccount() p.state.Restrict(err) if infraFeeAccount != (common.Address{}) { @@ -646,11 +646,11 @@ func (p *TxProcessor) EndTxHook(gasLeft uint64, success bool) { util.MintBalance(&networkFeeAccount, computeCost, p.evm, scenario, purpose) } posterFeeDestination := l1pricing.L1PricerFundsPoolAddress - if p.state.ArbOSVersion() < 2 { + if p.state.ArbOSVersion() < params.ArbosVersion_2 { posterFeeDestination = p.evm.Context.Coinbase } util.MintBalance(&posterFeeDestination, p.PosterFee, p.evm, scenario, purpose) - if p.state.ArbOSVersion() >= 10 { + if p.state.ArbOSVersion() >= params.ArbosVersion_10 { if _, err := p.state.L1PricingState().AddToL1FeesAvailable(p.PosterFee); err != nil { log.Error("failed to update L1FeesAvailable: ", "err", err) } @@ -748,13 +748,13 @@ func (p *TxProcessor) L1BlockHash(blockCtx vm.BlockContext, l1BlockNumber uint64 func (p *TxProcessor) DropTip() bool { version := p.state.ArbOSVersion() - return version != 9 || p.delayedInbox + return version != params.ArbosVersion_9 || p.delayedInbox } func (p *TxProcessor) GetPaidGasPrice() *big.Int { gasPrice := p.evm.GasPrice version := p.state.ArbOSVersion() - if version != 9 { + if version != params.ArbosVersion_9 { // p.evm.Context.BaseFee is already lowered to 0 when vm runs with NoBaseFee flag and 0 gas price gasPrice = p.evm.Context.BaseFee } @@ -762,7 +762,7 @@ func (p *TxProcessor) GetPaidGasPrice() *big.Int { } func (p *TxProcessor) GasPriceOp(evm *vm.EVM) *big.Int { - if p.state.ArbOSVersion() >= 3 { + if p.state.ArbOSVersion() >= params.ArbosVersion_3 { return p.GetPaidGasPrice() } return evm.GasPrice diff --git a/arbos/util/transfer.go b/arbos/util/transfer.go index c5873b7e93..0b61868abe 100644 --- a/arbos/util/transfer.go +++ b/arbos/util/transfer.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/util/arbmath" ) @@ -66,7 +67,7 @@ func TransferBalance( if arbmath.BigLessThan(balance.ToBig(), amount) { return fmt.Errorf("%w: addr %v have %v want %v", vm.ErrInsufficientBalance, *from, balance, amount) } - if evm.Context.ArbOSVersion < 30 && amount.Sign() == 0 { + if evm.Context.ArbOSVersion < params.ArbosVersion_30 && amount.Sign() == 0 { evm.StateDB.CreateZombieIfDeleted(*from) } evm.StateDB.SubBalance(*from, uint256.MustFromBig(amount), tracing.BalanceChangeTransfer) diff --git a/arbstate/inbox.go b/arbstate/inbox.go index b58a7420b7..5539a75ce1 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -85,11 +85,11 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash // Matches the way keyset validation was done inside DAS readers i.e logging the error // But other daproviders might just want to return the error if errors.Is(err, daprovider.ErrSeqMsgValidation) && daprovider.IsDASMessageHeaderByte(payload[0]) { - logLevel := log.Error if keysetValidationMode == daprovider.KeysetPanicIfInvalid { - logLevel = log.Crit + panic(err.Error()) + } else { + log.Error(err.Error()) } - logLevel(err.Error()) } else { return nil, err } diff --git a/cmd/conf/database.go b/cmd/conf/database.go index 8857b615f3..8d05c44500 100644 --- a/cmd/conf/database.go +++ b/cmd/conf/database.go @@ -112,16 +112,19 @@ func (c *PersistentConfig) Validate() error { } type PebbleConfig struct { + SyncMode bool `koanf:"sync-mode"` MaxConcurrentCompactions int `koanf:"max-concurrent-compactions"` Experimental PebbleExperimentalConfig `koanf:"experimental"` } var PebbleConfigDefault = PebbleConfig{ + SyncMode: false, // use NO-SYNC mode, see: https://github.com/ethereum/go-ethereum/issues/29819 MaxConcurrentCompactions: runtime.NumCPU(), Experimental: PebbleExperimentalConfigDefault, } func PebbleConfigAddOptions(prefix string, f *flag.FlagSet, defaultConfig *PebbleConfig) { + f.Bool(prefix+".sync-mode", defaultConfig.SyncMode, "if true sync mode is used (data needs to be written to WAL before the write is marked as completed)") f.Int(prefix+".max-concurrent-compactions", defaultConfig.MaxConcurrentCompactions, "maximum number of concurrent compactions") PebbleExperimentalConfigAddOptions(prefix+".experimental", f, &defaultConfig.Experimental) } @@ -180,7 +183,7 @@ var PebbleExperimentalConfigDefault = PebbleExperimentalConfig{ BlockSize: 4 << 10, // 4 KB IndexBlockSize: 4 << 10, // 4 KB TargetFileSize: 2 << 20, // 2 MB - TargetFileSizeEqualLevels: true, + TargetFileSizeEqualLevels: false, L0CompactionConcurrency: 10, CompactionDebtConcurrency: 1 << 30, // 1GB @@ -251,6 +254,7 @@ func (c *PebbleConfig) ExtraOptions(namespace string) *pebble.ExtraOptions { walDir = path.Join(walDir, namespace) } return &pebble.ExtraOptions{ + SyncMode: c.SyncMode, BytesPerSync: c.Experimental.BytesPerSync, L0CompactionFileThreshold: c.Experimental.L0CompactionFileThreshold, L0CompactionThreshold: c.Experimental.L0CompactionThreshold, diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index 06f94dc952..67998880e0 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -92,6 +92,7 @@ type ClientStoreConfig struct { SigningWallet string `koanf:"signing-wallet"` SigningWalletPassword string `koanf:"signing-wallet-password"` MaxStoreChunkBodySize int `koanf:"max-store-chunk-body-size"` + EnableChunkedStore bool `koanf:"enable-chunked-store"` } func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { @@ -104,6 +105,7 @@ func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { f.String("signing-wallet-password", genericconf.PASSWORD_NOT_SET, "password to unlock the wallet, if not specified the user is prompted for the password") f.Duration("das-retention-period", 24*time.Hour, "The period which DASes are requested to retain the stored batches.") f.Int("max-store-chunk-body-size", 512*1024, "The maximum HTTP POST body size for a chunked store request") + f.Bool("enable-chunked-store", true, "enable data to be sent to DAS in chunks instead of all at once") k, err := confighelpers.BeginCommonParse(f, args) if err != nil { @@ -152,7 +154,7 @@ func startClientStore(args []string) error { } } - client, err := das.NewDASRPCClient(config.URL, signer, config.MaxStoreChunkBodySize) + client, err := das.NewDASRPCClient(config.URL, signer, config.MaxStoreChunkBodySize, config.EnableChunkedStore) if err != nil { return err } diff --git a/cmd/nitro/init.go b/cmd/nitro/init.go index acad672bb0..93c51a0040 100644 --- a/cmd/nitro/init.go +++ b/cmd/nitro/init.go @@ -335,12 +335,12 @@ func validateBlockChain(blockChain *core.BlockChain, chainConfig *params.ChainCo } // Make sure we don't allow accidentally downgrading ArbOS if chainConfig.DebugMode() { - if currentArbosState.ArbOSVersion() > arbosState.MaxDebugArbosVersionSupported { - return fmt.Errorf("attempted to launch node in debug mode with ArbOS version %v on ArbOS state with version %v", arbosState.MaxDebugArbosVersionSupported, currentArbosState.ArbOSVersion()) + if currentArbosState.ArbOSVersion() > params.MaxDebugArbosVersionSupported { + return fmt.Errorf("attempted to launch node in debug mode with ArbOS version %v on ArbOS state with version %v", params.MaxDebugArbosVersionSupported, currentArbosState.ArbOSVersion()) } } else { - if currentArbosState.ArbOSVersion() > arbosState.MaxArbosVersionSupported { - return fmt.Errorf("attempted to launch node with ArbOS version %v on ArbOS state with version %v", arbosState.MaxArbosVersionSupported, currentArbosState.ArbOSVersion()) + if currentArbosState.ArbOSVersion() > params.MaxArbosVersionSupported { + return fmt.Errorf("attempted to launch node with ArbOS version %v on ArbOS state with version %v", params.MaxArbosVersionSupported, currentArbosState.ArbOSVersion()) } } diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 3fc042a799..e4e1b79353 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -231,10 +231,6 @@ func mainImpl() int { nodeConfig.Node.ParentChainReader.Enable = true } - if nodeConfig.Execution.Sequencer.Enable && nodeConfig.Node.ParentChainReader.Enable && nodeConfig.Node.InboxReader.HardReorg { - flag.Usage() - log.Crit("hard reorgs cannot safely be enabled with sequencer mode enabled") - } if nodeConfig.Execution.Sequencer.Enable != nodeConfig.Node.Sequencer { log.Error("consensus and execution must agree if sequencing is enabled or not", "Execution.Sequencer.Enable", nodeConfig.Execution.Sequencer.Enable, "Node.Sequencer", nodeConfig.Node.Sequencer) } diff --git a/contracts b/contracts index b140ed63ac..763bd77906 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit b140ed63acdb53cb906ffd1fa3c36fdbd474364e +Subproject commit 763bd77906b7677da691eaa31c6e195d455197a4 diff --git a/das/aggregator.go b/das/aggregator.go index 85fccb078f..4b1653a687 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -41,12 +41,14 @@ type AggregatorConfig struct { AssumedHonest int `koanf:"assumed-honest"` Backends BackendConfigList `koanf:"backends"` MaxStoreChunkBodySize int `koanf:"max-store-chunk-body-size"` + EnableChunkedStore bool `koanf:"enable-chunked-store"` } var DefaultAggregatorConfig = AggregatorConfig{ AssumedHonest: 0, Backends: nil, MaxStoreChunkBodySize: 512 * 1024, + EnableChunkedStore: true, } var parsedBackendsConf BackendConfigList @@ -56,6 +58,7 @@ func AggregatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".assumed-honest", DefaultAggregatorConfig.AssumedHonest, "Number of assumed honest backends (H). If there are N backends, K=N+1-H valid responses are required to consider an Store request to be successful.") f.Var(&parsedBackendsConf, prefix+".backends", "JSON RPC backend configuration. This can be specified on the command line as a JSON array, eg: [{\"url\": \"...\", \"pubkey\": \"...\"},...], or as a JSON array in the config file.") f.Int(prefix+".max-store-chunk-body-size", DefaultAggregatorConfig.MaxStoreChunkBodySize, "maximum HTTP POST body size to use for individual batch chunks, including JSON RPC overhead and an estimated overhead of 512B of headers") + f.Bool(prefix+".enable-chunked-store", DefaultAggregatorConfig.EnableChunkedStore, "enable data to be sent to DAS in chunks instead of all at once") } type Aggregator struct { diff --git a/das/aggregator_test.go b/das/aggregator_test.go index 1afb70e5f8..f8ccc187a8 100644 --- a/das/aggregator_test.go +++ b/das/aggregator_test.go @@ -57,7 +57,7 @@ func TestDAS_BasicAggregationLocal(t *testing.T) { backends = append(backends, *details) } - aggregator, err := NewAggregator(ctx, DataAvailabilityConfig{RPCAggregator: AggregatorConfig{AssumedHonest: 1}, ParentChainNodeURL: "none"}, backends) + aggregator, err := NewAggregator(ctx, DataAvailabilityConfig{RPCAggregator: AggregatorConfig{AssumedHonest: 1, EnableChunkedStore: true}, ParentChainNodeURL: "none"}, backends) Require(t, err) rawMsg := []byte("It's time for you to see the fnords.") @@ -214,7 +214,7 @@ func testConfigurableStorageFailures(t *testing.T, shouldFailAggregation bool) { aggregator, err := NewAggregator( ctx, DataAvailabilityConfig{ - RPCAggregator: AggregatorConfig{AssumedHonest: assumedHonest}, + RPCAggregator: AggregatorConfig{AssumedHonest: assumedHonest, EnableChunkedStore: true}, ParentChainNodeURL: "none", RequestTimeout: time.Millisecond * 2000, }, backends) diff --git a/das/dasRpcClient.go b/das/dasRpcClient.go index 3ea6c4e2c6..5d4ca0dc93 100644 --- a/das/dasRpcClient.go +++ b/das/dasRpcClient.go @@ -35,10 +35,11 @@ var ( ) type DASRPCClient struct { // implements DataAvailabilityService - clnt *rpc.Client - url string - signer signature.DataSignerFunc - chunkSize uint64 + clnt *rpc.Client + url string + signer signature.DataSignerFunc + chunkSize uint64 + enableChunkedStore bool } func nilSigner(_ []byte) ([]byte, error) { @@ -47,7 +48,7 @@ func nilSigner(_ []byte) ([]byte, error) { const sendChunkJSONBoilerplate = "{\"jsonrpc\":\"2.0\",\"id\":4294967295,\"method\":\"das_sendChunked\",\"params\":[\"\"]}" -func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChunkBodySize int) (*DASRPCClient, error) { +func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChunkBodySize int, enableChunkedStore bool) (*DASRPCClient, error) { clnt, err := rpc.Dial(target) if err != nil { return nil, err @@ -56,18 +57,23 @@ func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChu signer = nilSigner } + client := &DASRPCClient{ + clnt: clnt, + url: target, + signer: signer, + enableChunkedStore: enableChunkedStore, + } + // Byte arrays are encoded in base64 - chunkSize := (maxStoreChunkBodySize - len(sendChunkJSONBoilerplate) - 512 /* headers */) / 2 - if chunkSize <= 0 { - return nil, fmt.Errorf("max-store-chunk-body-size %d doesn't leave enough room for chunk payload", maxStoreChunkBodySize) + if enableChunkedStore { + chunkSize := (maxStoreChunkBodySize - len(sendChunkJSONBoilerplate) - 512 /* headers */) / 2 + if chunkSize <= 0 { + return nil, fmt.Errorf("max-store-chunk-body-size %d doesn't leave enough room for chunk payload", maxStoreChunkBodySize) + } + client.chunkSize = uint64(chunkSize) } - return &DASRPCClient{ - clnt: clnt, - url: target, - signer: signer, - chunkSize: uint64(chunkSize), - }, nil + return client, nil } func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64) (*daprovider.DataAvailabilityCertificate, error) { @@ -83,6 +89,11 @@ func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64 rpcClientStoreDurationHistogram.Update(time.Since(start).Nanoseconds()) }() + if !c.enableChunkedStore { + log.Debug("Legacy store is being force-used by the DAS client", "url", c.url) + return c.legacyStore(ctx, message, timeout) + } + // #nosec G115 timestamp := uint64(start.Unix()) nChunks := uint64(len(message)) / c.chunkSize diff --git a/das/rpc_aggregator.go b/das/rpc_aggregator.go index 916637aac6..1c9e2eecab 100644 --- a/das/rpc_aggregator.go +++ b/das/rpc_aggregator.go @@ -110,7 +110,7 @@ func ParseServices(config AggregatorConfig, signer signature.DataSignerFunc) ([] } metricName := metricsutil.CanonicalizeMetricName(url.Hostname()) - service, err := NewDASRPCClient(b.URL, signer, config.MaxStoreChunkBodySize) + service, err := NewDASRPCClient(b.URL, signer, config.MaxStoreChunkBodySize, config.EnableChunkedStore) if err != nil { return nil, err } diff --git a/das/rpc_test.go b/das/rpc_test.go index ebc4b736d5..c4ee71aa4f 100644 --- a/das/rpc_test.go +++ b/das/rpc_test.go @@ -84,6 +84,7 @@ func testRpcImpl(t *testing.T, size, times int, concurrent bool) { AssumedHonest: 1, Backends: beConfigs, MaxStoreChunkBodySize: (chunkSize * 2) + len(sendChunkJSONBoilerplate), + EnableChunkedStore: true, }, RequestTimeout: time.Minute, } diff --git a/execution/gethexec/api.go b/execution/gethexec/api.go index 713d1496f9..699aa081b5 100644 --- a/execution/gethexec/api.go +++ b/execution/gethexec/api.go @@ -17,6 +17,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbos/arbosState" @@ -285,14 +286,16 @@ func stateAndHeader(blockchain *core.BlockChain, block uint64) (*arbosState.Arbo type ArbTraceForwarderAPI struct { fallbackClientUrl string fallbackClientTimeout time.Duration + blockchainConfig *params.ChainConfig initialized atomic.Bool mutex sync.Mutex fallbackClient types.FallbackClient } -func NewArbTraceForwarderAPI(fallbackClientUrl string, fallbackClientTimeout time.Duration) *ArbTraceForwarderAPI { +func NewArbTraceForwarderAPI(blockchainConfig *params.ChainConfig, fallbackClientUrl string, fallbackClientTimeout time.Duration) *ArbTraceForwarderAPI { return &ArbTraceForwarderAPI{ + blockchainConfig: blockchainConfig, fallbackClientUrl: fallbackClientUrl, fallbackClientTimeout: fallbackClientTimeout, } @@ -332,16 +335,45 @@ func (api *ArbTraceForwarderAPI) forward(ctx context.Context, method string, arg return resp, nil } -func (api *ArbTraceForwarderAPI) Call(ctx context.Context, callArgs json.RawMessage, traceTypes json.RawMessage, blockNum json.RawMessage) (*json.RawMessage, error) { - return api.forward(ctx, "arbtrace_call", callArgs, traceTypes, blockNum) +func (api *ArbTraceForwarderAPI) blockSupportedByClassicNode(blockNumOrHash json.RawMessage) error { + var bnh rpc.BlockNumberOrHash + err := bnh.UnmarshalJSON(blockNumOrHash) + if err != nil { + return err + } + blockNum, isNum := bnh.Number() + if !isNum { + return nil + } + // #nosec G115 + if blockNum < 0 || blockNum > rpc.BlockNumber(api.blockchainConfig.ArbitrumChainParams.GenesisBlockNum) { + return fmt.Errorf("block number %v is not supported by classic node", blockNum) + } + return nil } -func (api *ArbTraceForwarderAPI) CallMany(ctx context.Context, calls json.RawMessage, blockNum json.RawMessage) (*json.RawMessage, error) { - return api.forward(ctx, "arbtrace_callMany", calls, blockNum) +func (api *ArbTraceForwarderAPI) Call(ctx context.Context, callArgs json.RawMessage, traceTypes json.RawMessage, blockNumOrHash json.RawMessage) (*json.RawMessage, error) { + err := api.blockSupportedByClassicNode(blockNumOrHash) + if err != nil { + return nil, err + } + return api.forward(ctx, "arbtrace_call", callArgs, traceTypes, blockNumOrHash) } -func (api *ArbTraceForwarderAPI) ReplayBlockTransactions(ctx context.Context, blockNum json.RawMessage, traceTypes json.RawMessage) (*json.RawMessage, error) { - return api.forward(ctx, "arbtrace_replayBlockTransactions", blockNum, traceTypes) +func (api *ArbTraceForwarderAPI) CallMany(ctx context.Context, calls json.RawMessage, blockNumOrHash json.RawMessage) (*json.RawMessage, error) { + err := api.blockSupportedByClassicNode(blockNumOrHash) + if err != nil { + return nil, err + } + return api.forward(ctx, "arbtrace_callMany", calls, blockNumOrHash) +} + +func (api *ArbTraceForwarderAPI) ReplayBlockTransactions(ctx context.Context, blockNumOrHash json.RawMessage, traceTypes json.RawMessage) (*json.RawMessage, error) { + err := api.blockSupportedByClassicNode(blockNumOrHash) + if err != nil { + return nil, err + } + return api.forward(ctx, "arbtrace_replayBlockTransactions", blockNumOrHash, traceTypes) } func (api *ArbTraceForwarderAPI) ReplayTransaction(ctx context.Context, txHash json.RawMessage, traceTypes json.RawMessage) (*json.RawMessage, error) { @@ -356,8 +388,12 @@ func (api *ArbTraceForwarderAPI) Get(ctx context.Context, txHash json.RawMessage return api.forward(ctx, "arbtrace_get", txHash, path) } -func (api *ArbTraceForwarderAPI) Block(ctx context.Context, blockNum json.RawMessage) (*json.RawMessage, error) { - return api.forward(ctx, "arbtrace_block", blockNum) +func (api *ArbTraceForwarderAPI) Block(ctx context.Context, blockNumOrHash json.RawMessage) (*json.RawMessage, error) { + err := api.blockSupportedByClassicNode(blockNumOrHash) + if err != nil { + return nil, err + } + return api.forward(ctx, "arbtrace_block", blockNumOrHash) } func (api *ArbTraceForwarderAPI) Filter(ctx context.Context, filter json.RawMessage) (*json.RawMessage, error) { diff --git a/execution/gethexec/executionengine.go b/execution/gethexec/executionengine.go index 69535e82be..ffc6ceee9f 100644 --- a/execution/gethexec/executionengine.go +++ b/execution/gethexec/executionengine.go @@ -789,7 +789,8 @@ func (s *ExecutionEngine) cacheL1PriceDataOfMsg(seqNum arbutil.MessageIndex, rec gasUsedForL1 += receipts[i].GasUsedForL1 } for _, tx := range block.Transactions() { - callDataUnits += tx.CalldataUnits + _, cachedUnits := tx.GetRawCachedCalldataUnits() + callDataUnits += cachedUnits } } l1GasCharged := gasUsedForL1 * block.BaseFee().Uint64() diff --git a/execution/gethexec/node.go b/execution/gethexec/node.go index 11d173a21e..16e4948723 100644 --- a/execution/gethexec/node.go +++ b/execution/gethexec/node.go @@ -284,6 +284,7 @@ func CreateExecutionNode( Namespace: "arbtrace", Version: "1.0", Service: NewArbTraceForwarderAPI( + l2BlockChain.Config(), config.RPC.ClassicRedirect, config.RPC.ClassicRedirectTimeout, ), diff --git a/execution/gethexec/sequencer.go b/execution/gethexec/sequencer.go index 92d440e8cb..faded7375c 100644 --- a/execution/gethexec/sequencer.go +++ b/execution/gethexec/sequencer.go @@ -490,7 +490,10 @@ func (s *Sequencer) preTxFilter(_ *params.ChainConfig, header *types.Header, sta return nil } -func (s *Sequencer) postTxFilter(header *types.Header, _ *arbosState.ArbosState, tx *types.Transaction, sender common.Address, dataGas uint64, result *core.ExecutionResult) error { +func (s *Sequencer) postTxFilter(header *types.Header, statedb *state.StateDB, _ *arbosState.ArbosState, tx *types.Transaction, sender common.Address, dataGas uint64, result *core.ExecutionResult) error { + if statedb.IsTxFiltered() { + return state.ErrArbTxFilter + } if result.Err != nil && result.UsedGas > dataGas && result.UsedGas-dataGas <= s.config().MaxRevertGasReject { return arbitrum.NewRevertReason(result) } diff --git a/go-ethereum b/go-ethereum index 4f47f4c6ea..26b4dff616 160000 --- a/go-ethereum +++ b/go-ethereum @@ -1 +1 @@ -Subproject commit 4f47f4c6eafd81290d51a7f11fbd99bc2fc3c5a6 +Subproject commit 26b4dff6165650b6963fb1b6f88958c29c059214 diff --git a/precompiles/ArbGasInfo.go b/precompiles/ArbGasInfo.go index 8d916926f3..c85ed93f39 100644 --- a/precompiles/ArbGasInfo.go +++ b/precompiles/ArbGasInfo.go @@ -29,7 +29,7 @@ func (con ArbGasInfo) GetPricesInWeiWithAggregator( evm mech, aggregator addr, ) (huge, huge, huge, huge, huge, huge, error) { - if c.State.ArbOSVersion() < 4 { + if c.State.ArbOSVersion() < params.ArbosVersion_4 { return con._preVersion4_GetPricesInWeiWithAggregator(c, evm, aggregator) } @@ -105,7 +105,7 @@ func (con ArbGasInfo) GetPricesInWei(c ctx, evm mech) (huge, huge, huge, huge, h // GetPricesInArbGasWithAggregator gets prices in ArbGas when using the provided aggregator func (con ArbGasInfo) GetPricesInArbGasWithAggregator(c ctx, evm mech, aggregator addr) (huge, huge, huge, error) { - if c.State.ArbOSVersion() < 4 { + if c.State.ArbOSVersion() < params.ArbosVersion_4 { return con._preVersion4_GetPricesInArbGasWithAggregator(c, evm, aggregator) } l1GasPrice, err := c.State.L1PricingState().PricePerUnit() @@ -220,7 +220,7 @@ func (con ArbGasInfo) GetGasBacklogTolerance(c ctx, evm mech) (uint64, error) { // GetL1PricingSurplus gets the surplus of funds for L1 batch posting payments (may be negative) func (con ArbGasInfo) GetL1PricingSurplus(c ctx, evm mech) (*big.Int, error) { - if c.State.ArbOSVersion() < 10 { + if c.State.ArbOSVersion() < params.ArbosVersion_10 { return con._preversion10_GetL1PricingSurplus(c, evm) } ps := c.State.L1PricingState() diff --git a/precompiles/ArbOwnerPublic.go b/precompiles/ArbOwnerPublic.go index 451e18e1cc..792b4bb59d 100644 --- a/precompiles/ArbOwnerPublic.go +++ b/precompiles/ArbOwnerPublic.go @@ -5,6 +5,7 @@ package precompiles import ( "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" ) // ArbOwnerPublic precompile provides non-owners with info about the current chain owners. @@ -42,7 +43,7 @@ func (con ArbOwnerPublic) GetNetworkFeeAccount(c ctx, evm mech) (addr, error) { // GetInfraFeeAccount gets the infrastructure fee collector func (con ArbOwnerPublic) GetInfraFeeAccount(c ctx, evm mech) (addr, error) { - if c.State.ArbOSVersion() < 6 { + if c.State.ArbOSVersion() < params.ArbosVersion_6 { return c.State.NetworkFeeAccount() } return c.State.InfraFeeAccount() diff --git a/precompiles/ArbOwner_test.go b/precompiles/ArbOwner_test.go index 51b2fc0cd9..74b29a79b5 100644 --- a/precompiles/ArbOwner_test.go +++ b/precompiles/ArbOwner_test.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/burn" @@ -218,7 +219,7 @@ func TestArbInfraFeeAccount(t *testing.T) { err = prec.SetInfraFeeAccount(callCtx, evm, newAddr) // this should be a no-op (because ArbOS version 0) Require(t, err) - version5 := uint64(5) + version5 := params.ArbosVersion_5 evm = newMockEVMForTestingWithVersion(&version5) callCtx = testContext(caller, evm) prec = &ArbOwner{} diff --git a/precompiles/ArbRetryableTx.go b/precompiles/ArbRetryableTx.go index 8fb5aa9391..06e5ccd352 100644 --- a/precompiles/ArbRetryableTx.go +++ b/precompiles/ArbRetryableTx.go @@ -39,7 +39,7 @@ type ArbRetryableTx struct { var ErrSelfModifyingRetryable = errors.New("retryable cannot modify itself") func (con ArbRetryableTx) oldNotFoundError(c ctx) error { - if c.State.ArbOSVersion() >= 3 { + if c.State.ArbOSVersion() >= params.ArbosVersion_3 { return con.NoTicketWithIDError() } return errors.New("ticketId not found") diff --git a/precompiles/ArbSys.go b/precompiles/ArbSys.go index 04cde46ebe..9742ed51f4 100644 --- a/precompiles/ArbSys.go +++ b/precompiles/ArbSys.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/util" "github.com/offchainlabs/nitro/util/arbmath" @@ -37,7 +38,7 @@ func (con *ArbSys) ArbBlockNumber(c ctx, evm mech) (huge, error) { // ArbBlockHash gets the L2 block hash, if sufficiently recent func (con *ArbSys) ArbBlockHash(c ctx, evm mech, arbBlockNumber *big.Int) (bytes32, error) { if !arbBlockNumber.IsUint64() { - if c.State.ArbOSVersion() >= 11 { + if c.State.ArbOSVersion() >= params.ArbosVersion_11 { return bytes32{}, con.InvalidBlockNumberError(arbBlockNumber, evm.Context.BlockNumber) } return bytes32{}, errors.New("invalid block number") @@ -46,7 +47,7 @@ func (con *ArbSys) ArbBlockHash(c ctx, evm mech, arbBlockNumber *big.Int) (bytes currentNumber := evm.Context.BlockNumber.Uint64() if requestedBlockNum >= currentNumber || requestedBlockNum+256 < currentNumber { - if c.State.ArbOSVersion() >= 11 { + if c.State.ArbOSVersion() >= params.ArbosVersion_11 { return common.Hash{}, con.InvalidBlockNumberError(arbBlockNumber, evm.Context.BlockNumber) } return common.Hash{}, errors.New("invalid block number for ArbBlockHAsh") @@ -84,7 +85,7 @@ func (con *ArbSys) MapL1SenderContractAddressToL2Alias(c ctx, sender addr, dest // WasMyCallersAddressAliased checks if the caller's caller was aliased func (con *ArbSys) WasMyCallersAddressAliased(c ctx, evm mech) (bool, error) { topLevel := con.isTopLevel(c, evm) - if c.State.ArbOSVersion() < 6 { + if c.State.ArbOSVersion() < params.ArbosVersion_6 { topLevel = evm.Depth() == 2 } aliased := topLevel && util.DoesTxTypeAlias(c.txProcessor.TopTxType) @@ -180,7 +181,7 @@ func (con *ArbSys) SendTxToL1(c ctx, evm mech, value huge, destination addr, cal calldataForL1, ) - if c.State.ArbOSVersion() >= 4 { + if c.State.ArbOSVersion() >= params.ArbosVersion_4 { return leafNum, nil } return sendHash.Big(), err diff --git a/precompiles/context.go b/precompiles/context.go index 670ffa7443..86e56ffbff 100644 --- a/precompiles/context.go +++ b/precompiles/context.go @@ -9,7 +9,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/arbosState" @@ -58,7 +57,7 @@ func (c *Context) GasLeft() *uint64 { } func (c *Context) Restrict(err error) { - log.Crit("A metered burner was used for access-controlled work", "error", err) + panic("A metered burner was used for access-controlled work :" + err.Error()) } func (c *Context) HandleError(err error) error { @@ -88,13 +87,13 @@ func testContext(caller addr, evm mech) *Context { } state, err := arbosState.OpenArbosState(evm.StateDB, burn.NewSystemBurner(tracingInfo, false)) if err != nil { - log.Crit("unable to open arbos state", "error", err) + panic("unable to open arbos state :" + err.Error()) } ctx.State = state var ok bool ctx.txProcessor, ok = evm.ProcessingHook.(*arbos.TxProcessor) if !ok { - log.Crit("must have tx processor") + panic("must have tx processor") } return ctx } diff --git a/precompiles/precompile.go b/precompiles/precompile.go index 5b5376a4ca..7ca9d409c6 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -120,7 +120,7 @@ func (e *SolError) Error() string { func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Precompile) { source, err := abi.JSON(strings.NewReader(metadata.ABI)) if err != nil { - log.Crit("Bad ABI") + panic("Bad ABI") } implementerType := reflect.TypeOf(implementer) @@ -128,12 +128,12 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr _, ok := implementerType.Elem().FieldByName("Address") if !ok { - log.Crit("Implementer for precompile ", contract, " is missing an Address field") + panic("Implementer for precompile " + contract + " is missing an Address field") } address, ok := reflect.ValueOf(implementer).Elem().FieldByName("Address").Interface().(addr) if !ok { - log.Crit("Implementer for precompile ", contract, "'s Address field has the wrong type") + panic("Implementer for precompile " + contract + "'s Address field has the wrong type") } gethAbiFuncTypeEquality := func(actual, geth reflect.Type) bool { @@ -167,7 +167,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr name = capitalize + name[1:] if len(method.ID) != 4 { - log.Crit("Method ID isn't 4 bytes") + panic("Method ID isn't 4 bytes") } id := *(*[4]byte)(method.ID) @@ -175,7 +175,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr handler, ok := implementerType.MethodByName(name) if !ok { - log.Crit("Precompile " + contract + " must implement " + name) + panic("Precompile " + contract + " must implement " + name) } var needs = []reflect.Type{ @@ -199,7 +199,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr needs = append(needs, reflect.TypeOf(&big.Int{})) purity = payable default: - log.Crit("Unknown state mutability ", method.StateMutability) + panic("Unknown state mutability " + method.StateMutability) } for _, arg := range method.Inputs { @@ -215,10 +215,9 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr expectedHandlerType := reflect.FuncOf(needs, outputs, false) if !gethAbiFuncTypeEquality(handler.Type, expectedHandlerType) { - log.Crit( - "Precompile "+contract+"'s "+name+"'s implementer has the wrong type\n", - "\texpected:\t", expectedHandlerType, "\n\tbut have:\t", handler.Type, - ) + panic( + "Precompile " + contract + "'s " + name + "'s implementer has the wrong type\n" + + "\texpected:\t" + expectedHandlerType.String() + "\n\tbut have:\t" + handler.Type.String()) } method := PrecompileMethod{ @@ -237,7 +236,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr method := implementerType.Method(i) name := method.Name if method.IsExported() && methodsByName[name] == nil { - log.Crit(contract + " is missing a solidity interface for " + name) + panic(contract + " is missing a solidity interface for " + name) } } @@ -269,11 +268,10 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr if arg.Indexed { _, ok := supportedIndices[arg.Type.String()] if !ok { - log.Crit( - "Please change the solidity for precompile ", contract, - "'s event ", name, ":\n\tEvent indices of type ", - arg.Type.String(), " are not supported", - ) + panic( + "Please change the solidity for precompile " + contract + + "'s event " + name + ":\n\tEvent indices of type " + + arg.Type.String() + " are not supported") } } } @@ -288,23 +286,21 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr field, ok := implementerType.Elem().FieldByName(name) if !ok { - log.Crit(missing, "event ", name, " of type\n\t", expectedFieldType) + panic(missing + "event " + name + " of type\n\t" + expectedFieldType.String()) } costField, ok := implementerType.Elem().FieldByName(name + "GasCost") if !ok { - log.Crit(missing, "event ", name, "'s GasCost of type\n\t", expectedCostType) + panic(missing + "event " + name + "'s GasCost of type\n\t" + expectedCostType.String()) } if !gethAbiFuncTypeEquality(field.Type, expectedFieldType) { - log.Crit( - context, "'s field for event ", name, " has the wrong type\n", - "\texpected:\t", expectedFieldType, "\n\tbut have:\t", field.Type, - ) + panic( + context + "'s field for event " + name + " has the wrong type\n" + + "\texpected:\t" + expectedFieldType.String() + "\n\tbut have:\t" + field.Type.String()) } if !gethAbiFuncTypeEquality(costField.Type, expectedCostType) { - log.Crit( - context, "'s field for event ", name, "GasCost has the wrong type\n", - "\texpected:\t", expectedCostType, "\n\tbut have:\t", costField.Type, - ) + panic( + context + "'s field for event " + name + "GasCost has the wrong type\n" + + "\texpected:\t" + expectedCostType.String() + "\n\tbut have:\t" + costField.Type.String()) } structFields := reflect.ValueOf(implementer).Elem() @@ -361,7 +357,7 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr args = args[2:] version := arbosState.ArbOSVersion(state) - if callerCtx.readOnly && version >= 11 { + if callerCtx.readOnly && version >= params.ArbosVersion_11 { return []reflect.Value{reflect.ValueOf(vm.ErrWriteProtection)} } @@ -464,13 +460,12 @@ func MakePrecompile(metadata *bind.MetaData, implementer interface{}) (addr, *Pr field, ok := implementerType.Elem().FieldByName(name + "Error") if !ok { - log.Crit(missing, "custom error ", name, "Error of type\n\t", expectedFieldType) + panic(missing + "custom error " + name + "Error of type\n\t" + expectedFieldType.String()) } if field.Type != expectedFieldType { - log.Crit( - context, "'s field for error ", name, "Error has the wrong type\n", - "\texpected:\t", expectedFieldType, "\n\tbut have:\t", field.Type, - ) + panic( + context + "'s field for error " + name + "Error has the wrong type\n" + + "\texpected:\t" + expectedFieldType.String() + "\n\tbut have:\t" + field.Type.String()) } structFields := reflect.ValueOf(implementer).Elem() @@ -531,14 +526,14 @@ func Precompiles() map[addr]ArbosPrecompile { insert(MakePrecompile(pgen.ArbFunctionTableMetaData, &ArbFunctionTable{Address: types.ArbFunctionTableAddress})) insert(MakePrecompile(pgen.ArbosTestMetaData, &ArbosTest{Address: types.ArbosTestAddress})) ArbGasInfo := insert(MakePrecompile(pgen.ArbGasInfoMetaData, &ArbGasInfo{Address: types.ArbGasInfoAddress})) - ArbGasInfo.methodsByName["GetL1FeesAvailable"].arbosVersion = 10 - ArbGasInfo.methodsByName["GetL1RewardRate"].arbosVersion = 11 - ArbGasInfo.methodsByName["GetL1RewardRecipient"].arbosVersion = 11 - ArbGasInfo.methodsByName["GetL1PricingEquilibrationUnits"].arbosVersion = 20 - ArbGasInfo.methodsByName["GetLastL1PricingUpdateTime"].arbosVersion = 20 - ArbGasInfo.methodsByName["GetL1PricingFundsDueForRewards"].arbosVersion = 20 - ArbGasInfo.methodsByName["GetL1PricingUnitsSinceUpdate"].arbosVersion = 20 - ArbGasInfo.methodsByName["GetLastL1PricingSurplus"].arbosVersion = 20 + ArbGasInfo.methodsByName["GetL1FeesAvailable"].arbosVersion = params.ArbosVersion_10 + ArbGasInfo.methodsByName["GetL1RewardRate"].arbosVersion = params.ArbosVersion_11 + ArbGasInfo.methodsByName["GetL1RewardRecipient"].arbosVersion = params.ArbosVersion_11 + ArbGasInfo.methodsByName["GetL1PricingEquilibrationUnits"].arbosVersion = params.ArbosVersion_20 + ArbGasInfo.methodsByName["GetLastL1PricingUpdateTime"].arbosVersion = params.ArbosVersion_20 + ArbGasInfo.methodsByName["GetL1PricingFundsDueForRewards"].arbosVersion = params.ArbosVersion_20 + ArbGasInfo.methodsByName["GetL1PricingUnitsSinceUpdate"].arbosVersion = params.ArbosVersion_20 + ArbGasInfo.methodsByName["GetLastL1PricingSurplus"].arbosVersion = params.ArbosVersion_20 insert(MakePrecompile(pgen.ArbAggregatorMetaData, &ArbAggregator{Address: types.ArbAggregatorAddress})) insert(MakePrecompile(pgen.ArbStatisticsMetaData, &ArbStatistics{Address: types.ArbStatisticsAddress})) @@ -554,10 +549,10 @@ func Precompiles() map[addr]ArbosPrecompile { ArbOwnerPublicImpl := &ArbOwnerPublic{Address: types.ArbOwnerPublicAddress} ArbOwnerPublic := insert(MakePrecompile(pgen.ArbOwnerPublicMetaData, ArbOwnerPublicImpl)) - ArbOwnerPublic.methodsByName["GetInfraFeeAccount"].arbosVersion = 5 - ArbOwnerPublic.methodsByName["RectifyChainOwner"].arbosVersion = 11 - ArbOwnerPublic.methodsByName["GetBrotliCompressionLevel"].arbosVersion = 20 - ArbOwnerPublic.methodsByName["GetScheduledUpgrade"].arbosVersion = 20 + ArbOwnerPublic.methodsByName["GetInfraFeeAccount"].arbosVersion = params.ArbosVersion_5 + ArbOwnerPublic.methodsByName["RectifyChainOwner"].arbosVersion = params.ArbosVersion_11 + ArbOwnerPublic.methodsByName["GetBrotliCompressionLevel"].arbosVersion = params.ArbosVersion_20 + ArbOwnerPublic.methodsByName["GetScheduledUpgrade"].arbosVersion = params.ArbosVersion_20 ArbWasmImpl := &ArbWasm{Address: types.ArbWasmAddress} ArbWasm := insert(MakePrecompile(pgen.ArbWasmMetaData, ArbWasmImpl)) @@ -611,11 +606,11 @@ func Precompiles() map[addr]ArbosPrecompile { return ArbOwnerImpl.OwnerActs(context, evm, method, owner, data) } _, ArbOwner := MakePrecompile(pgen.ArbOwnerMetaData, ArbOwnerImpl) - ArbOwner.methodsByName["GetInfraFeeAccount"].arbosVersion = 5 - ArbOwner.methodsByName["SetInfraFeeAccount"].arbosVersion = 5 - ArbOwner.methodsByName["ReleaseL1PricerSurplusFunds"].arbosVersion = 10 - ArbOwner.methodsByName["SetChainConfig"].arbosVersion = 11 - ArbOwner.methodsByName["SetBrotliCompressionLevel"].arbosVersion = 20 + ArbOwner.methodsByName["GetInfraFeeAccount"].arbosVersion = params.ArbosVersion_5 + ArbOwner.methodsByName["SetInfraFeeAccount"].arbosVersion = params.ArbosVersion_5 + ArbOwner.methodsByName["ReleaseL1PricerSurplusFunds"].arbosVersion = params.ArbosVersion_10 + ArbOwner.methodsByName["SetChainConfig"].arbosVersion = params.ArbosVersion_11 + ArbOwner.methodsByName["SetBrotliCompressionLevel"].arbosVersion = params.ArbosVersion_20 stylusMethods := []string{ "SetInkPrice", "SetWasmMaxStackDepth", "SetWasmFreePages", "SetWasmPageGas", "SetWasmPageLimit", "SetWasmMinInitGas", "SetWasmInitCostScalar", @@ -756,7 +751,7 @@ func (p *Precompile) Call( reflectArgs = append(reflectArgs, reflect.ValueOf(evm)) reflectArgs = append(reflectArgs, reflect.ValueOf(value)) default: - log.Crit("Unknown state mutability ", method.purity) + panic("Unknown state mutability " + strconv.Itoa(int(method.purity))) } args, err := method.template.Inputs.Unpack(input[4:]) @@ -798,7 +793,7 @@ func (p *Precompile) Call( ) } // nolint:errorlint - if arbosVersion >= 11 || errRet == vm.ErrExecutionReverted { + if arbosVersion >= params.ArbosVersion_11 || errRet == vm.ErrExecutionReverted { return nil, callerCtx.gasLeft, vm.ErrExecutionReverted } // Preserve behavior with old versions which would zero out gas on this type of error diff --git a/precompiles/precompile_test.go b/precompiles/precompile_test.go index c8b8a46b96..183ec1f083 100644 --- a/precompiles/precompile_test.go +++ b/precompiles/precompile_test.go @@ -5,15 +5,12 @@ package precompiles import ( "fmt" - "io" "math/big" - "os" "testing" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/storage" @@ -183,20 +180,14 @@ func TestEventCosts(t *testing.T) { } func TestPrecompilesPerArbosVersion(t *testing.T) { - // Set up a logger in case log.Crit is called by Precompiles() - glogger := log.NewGlogHandler( - log.NewTerminalHandler(io.Writer(os.Stderr), false)) - glogger.Verbosity(log.LevelWarn) - log.SetDefault(log.NewLogger(glogger)) - expectedNewMethodsPerArbosVersion := map[uint64]int{ - 0: 89, - 5: 3, - 10: 2, - 11: 4, - 20: 8, - 30: 38, - 31: 1, + 0: 89, + params.ArbosVersion_5: 3, + params.ArbosVersion_10: 2, + params.ArbosVersion_11: 4, + params.ArbosVersion_20: 8, + params.ArbosVersion_30: 38, + params.ArbosVersion_31: 1, } precompiles := Precompiles() diff --git a/precompiles/wrapper.go b/precompiles/wrapper.go index edc079fc5b..028aed755b 100644 --- a/precompiles/wrapper.go +++ b/precompiles/wrapper.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos/arbosState" "github.com/offchainlabs/nitro/arbos/util" @@ -102,7 +103,7 @@ func (wrapper *OwnerPrecompile) Call( } version := arbosState.ArbOSVersion(evm.StateDB) - if !readOnly || version < 11 { + if !readOnly || version < params.ArbosVersion_11 { // log that the owner operation succeeded if err := wrapper.emitSuccess(evm, *(*[4]byte)(input[:4]), caller, input); err != nil { log.Error("failed to emit OwnerActs event", "err", err) diff --git a/staker/bold/bold_staker.go b/staker/bold/bold_staker.go index 1a8eed80fa..063f7b9719 100644 --- a/staker/bold/bold_staker.go +++ b/staker/bold/bold_staker.go @@ -244,20 +244,27 @@ func (b *BOLDStaker) Start(ctxIn context.Context) { if err != nil { log.Warn("error updating latest wasm module root", "err", err) } + confirmedMsgCount, confirmedGlobalState, err := b.getLatestState(ctx, true) + if err != nil { + log.Error("staker: error checking latest confirmed", "err", err) + } + agreedMsgCount, agreedGlobalState, err := b.getLatestState(ctx, false) if err != nil { log.Error("staker: error checking latest agreed", "err", err) } + if agreedGlobalState == nil { + // If we don't have a latest agreed global state, we should fall back to + // using the latest confirmed global state. + agreedGlobalState = confirmedGlobalState + agreedMsgCount = confirmedMsgCount + } if agreedGlobalState != nil { for _, notifier := range b.stakedNotifiers { notifier.UpdateLatestStaked(agreedMsgCount, *agreedGlobalState) } } - confirmedMsgCount, confirmedGlobalState, err := b.getLatestState(ctx, true) - if err != nil { - log.Error("staker: error checking latest confirmed", "err", err) - } if confirmedGlobalState != nil { for _, notifier := range b.confirmedNotifiers { diff --git a/staker/legacy/staker.go b/staker/legacy/staker.go index fa74be327f..504e8c8421 100644 --- a/staker/legacy/staker.go +++ b/staker/legacy/staker.go @@ -323,9 +323,6 @@ func NewStaker( return nil, err } stakerLastSuccessfulActionGauge.Update(time.Now().Unix()) - if config().StartValidationFromStaked && blockValidator != nil { - stakedNotifiers = append(stakedNotifiers, blockValidator) - } inactiveValidatedNodes := btree.NewG(2, func(a, b validatedNode) bool { return a.number < b.number || (a.number == b.number && a.hash.Cmp(b.hash) < 0) }) diff --git a/system_tests/arbos_upgrade_test.go b/system_tests/arbos_upgrade_test.go new file mode 100644 index 0000000000..a7103a8585 --- /dev/null +++ b/system_tests/arbos_upgrade_test.go @@ -0,0 +1,271 @@ +// Copyright 2021-2024, Offchain Labs, Inc. +// For license information, see https://github.com/OffchainLabs/nitro/blob/master/LICENSE + +package arbtest + +import ( + "context" + "math/big" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + + "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbos/arbosState" + "github.com/offchainlabs/nitro/execution/gethexec" + "github.com/offchainlabs/nitro/solgen/go/mocksgen" + "github.com/offchainlabs/nitro/solgen/go/precompilesgen" +) + +func TestScheduleArbosUpgrade(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + cleanup := builder.Build(t) + defer cleanup() + + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + + arbOwnerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("0x6b"), builder.L2.Client) + Require(t, err, "could not bind ArbOwner contract") + + arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), builder.L2.Client) + Require(t, err, "could not bind ArbOwner contract") + + callOpts := &bind.CallOpts{Context: ctx} + scheduled, err := arbOwnerPublic.GetScheduledUpgrade(callOpts) + Require(t, err, "failed to call GetScheduledUpgrade before scheduling upgrade") + if scheduled.ArbosVersion != 0 || scheduled.ScheduledForTimestamp != 0 { + t.Errorf("expected no upgrade to be scheduled, got version %v timestamp %v", scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) + } + + // Schedule a noop upgrade, which should test GetScheduledUpgrade in the same way an already completed upgrade would. + tx, err := arbOwner.ScheduleArbOSUpgrade(&auth, 1, 1) + Require(t, err) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + scheduled, err = arbOwnerPublic.GetScheduledUpgrade(callOpts) + Require(t, err, "failed to call GetScheduledUpgrade after scheduling noop upgrade") + if scheduled.ArbosVersion != 0 || scheduled.ScheduledForTimestamp != 0 { + t.Errorf("expected completed scheduled upgrade to be ignored, got version %v timestamp %v", scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) + } + + // We can't test 11 -> 20 because 11 doesn't have the GetScheduledUpgrade method we want to test + var testVersion uint64 = 100 + var testTimestamp uint64 = 1 << 62 + tx, err = arbOwner.ScheduleArbOSUpgrade(&auth, 100, 1<<62) + Require(t, err) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + scheduled, err = arbOwnerPublic.GetScheduledUpgrade(callOpts) + Require(t, err, "failed to call GetScheduledUpgrade after scheduling upgrade") + if scheduled.ArbosVersion != testVersion || scheduled.ScheduledForTimestamp != testTimestamp { + t.Errorf("expected upgrade to be scheduled for version %v timestamp %v, got version %v timestamp %v", testVersion, testTimestamp, scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) + } +} + +func checkArbOSVersion(t *testing.T, testClient *TestClient, expectedVersion uint64, scenario string) { + statedb, err := testClient.ExecNode.Backend.ArbInterface().BlockChain().State() + Require(t, err, "could not get statedb", scenario) + state, err := arbosState.OpenSystemArbosState(statedb, nil, true) + Require(t, err, "could not open ArbOS state", scenario) + if state.ArbOSVersion() != expectedVersion { + t.Errorf("%s: expected ArbOS version %v, got %v", scenario, expectedVersion, state.ArbOSVersion()) + } + +} + +func TestArbos11To32UpgradeWithMcopy(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + initialVersion := uint64(11) + finalVersion := uint64(32) + + builder := NewNodeBuilder(ctx). + DefaultConfig(t, true). + WithArbOSVersion(initialVersion) + cleanup := builder.Build(t) + defer cleanup() + seqTestClient := builder.L2 + + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + auth.GasLimit = 32000000 + + // makes Owner a chain owner + arbDebug, err := precompilesgen.NewArbDebug(types.ArbDebugAddress, seqTestClient.Client) + Require(t, err) + tx, err := arbDebug.BecomeChainOwner(&auth) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, seqTestClient.Client, tx) + Require(t, err) + + // deploys test contract + _, tx, contract, err := mocksgen.DeployArbOS11To32UpgradeTest(&auth, seqTestClient.Client) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, seqTestClient.Client, tx) + Require(t, err) + + // build replica node + replicaConfig := arbnode.ConfigDefaultL1Test() + replicaConfig.BatchPoster.Enable = false + replicaTestClient, replicaCleanup := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: replicaConfig}) + defer replicaCleanup() + + checkArbOSVersion(t, seqTestClient, initialVersion, "initial sequencer") + checkArbOSVersion(t, replicaTestClient, initialVersion, "initial replica") + + // mcopy should fail since arbos 11 doesn't support it + tx, err = contract.Mcopy(&auth) + Require(t, err) + _, err = seqTestClient.EnsureTxSucceeded(tx) + if (err == nil) || !strings.Contains(err.Error(), "invalid opcode: MCOPY") { + t.Errorf("expected MCOPY to fail, got %v", err) + } + _, err = WaitForTx(ctx, replicaTestClient.Client, tx.Hash(), time.Second*15) + Require(t, err) + + // upgrade arbos to final version + arbOwner, err := precompilesgen.NewArbOwner(types.ArbOwnerAddress, seqTestClient.Client) + Require(t, err) + tx, err = arbOwner.ScheduleArbOSUpgrade(&auth, finalVersion, 0) + Require(t, err) + _, err = seqTestClient.EnsureTxSucceeded(tx) + Require(t, err) + _, err = WaitForTx(ctx, replicaTestClient.Client, tx.Hash(), time.Second*15) + Require(t, err) + + // checks upgrade worked + tx, err = contract.Mcopy(&auth) + Require(t, err) + _, err = seqTestClient.EnsureTxSucceeded(tx) + Require(t, err) + _, err = WaitForTx(ctx, replicaTestClient.Client, tx.Hash(), time.Second*15) + Require(t, err) + + checkArbOSVersion(t, seqTestClient, finalVersion, "final sequencer") + checkArbOSVersion(t, replicaTestClient, finalVersion, "final replica") + + // generates more blocks + builder.L2Info.GenerateAccount("User2") + for i := 0; i < 3; i++ { + tx = builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, big.NewInt(1e12), nil) + err = seqTestClient.Client.SendTransaction(ctx, tx) + Require(t, err) + _, err = seqTestClient.EnsureTxSucceeded(tx) + Require(t, err) + _, err = WaitForTx(ctx, replicaTestClient.Client, tx.Hash(), time.Second*15) + Require(t, err) + } + + blockNumberSeq, err := seqTestClient.Client.BlockNumber(ctx) + Require(t, err) + blockNumberReplica, err := replicaTestClient.Client.BlockNumber(ctx) + Require(t, err) + if blockNumberSeq != blockNumberReplica { + t.Errorf("expected sequencer and replica to have same block number, got %v and %v", blockNumberSeq, blockNumberReplica) + } + // #nosec G115 + blockNumber := big.NewInt(int64(blockNumberSeq)) + + blockSeq, err := seqTestClient.Client.BlockByNumber(ctx, blockNumber) + Require(t, err) + blockReplica, err := replicaTestClient.Client.BlockByNumber(ctx, blockNumber) + Require(t, err) + if blockSeq.Hash() != blockReplica.Hash() { + t.Errorf("expected sequencer and replica to have same block hash, got %v and %v", blockSeq.Hash(), blockReplica.Hash()) + } +} + +func TestArbos11To32UpgradeWithCalldata(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + initialVersion := uint64(11) + finalVersion := uint64(32) + + builder := NewNodeBuilder(ctx). + DefaultConfig(t, true). + WithArbOSVersion(initialVersion) + builder.execConfig.TxPreChecker.Strictness = gethexec.TxPreCheckerStrictnessLikelyCompatible + cleanup := builder.Build(t) + defer cleanup() + seqTestClient := builder.L2 + + auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) + auth.GasLimit = 32000000 + + // makes Owner a chain owner + arbDebug, err := precompilesgen.NewArbDebug(types.ArbDebugAddress, seqTestClient.Client) + Require(t, err) + tx, err := arbDebug.BecomeChainOwner(&auth) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, seqTestClient.Client, tx) + Require(t, err) + + // build replica node + replicaConfig := arbnode.ConfigDefaultL1Test() + replicaConfig.BatchPoster.Enable = false + replicaTestClient, replicaCleanup := builder.Build2ndNode(t, &SecondNodeParams{nodeConfig: replicaConfig}) + defer replicaCleanup() + + checkArbOSVersion(t, seqTestClient, initialVersion, "initial sequencer") + checkArbOSVersion(t, replicaTestClient, initialVersion, "initial replica") + + // upgrade arbos to final version + arbOwner, err := precompilesgen.NewArbOwner(types.ArbOwnerAddress, seqTestClient.Client) + Require(t, err) + tx, err = arbOwner.ScheduleArbOSUpgrade(&auth, finalVersion, 0) + Require(t, err) + _, err = seqTestClient.EnsureTxSucceeded(tx) + Require(t, err) + _, err = WaitForTx(ctx, replicaTestClient.Client, tx.Hash(), time.Second*15) + Require(t, err) + + // checks upgrade worked + var data []byte + for i := range 10 { + for range 100 { + data = append(data, byte(i)) + } + } + tx = builder.L2Info.PrepareTx("Owner", "Owner", builder.L2Info.TransferGas, big.NewInt(1e12), data) + err = seqTestClient.Client.SendTransaction(ctx, tx) + Require(t, err) + _, err = seqTestClient.EnsureTxSucceeded(tx) + Require(t, err) + _, err = WaitForTx(ctx, replicaTestClient.Client, tx.Hash(), time.Second*15) + Require(t, err) + + checkArbOSVersion(t, seqTestClient, finalVersion, "final sequencer") + checkArbOSVersion(t, replicaTestClient, finalVersion, "final replica") + + blockNumberSeq, err := seqTestClient.Client.BlockNumber(ctx) + Require(t, err) + blockNumberReplica, err := replicaTestClient.Client.BlockNumber(ctx) + Require(t, err) + if blockNumberSeq != blockNumberReplica { + t.Errorf("expected sequencer and replica to have same block number, got %v and %v", blockNumberSeq, blockNumberReplica) + } + // #nosec G115 + blockNumber := big.NewInt(int64(blockNumberSeq)) + + blockSeq, err := seqTestClient.Client.BlockByNumber(ctx, blockNumber) + Require(t, err) + blockReplica, err := replicaTestClient.Client.BlockByNumber(ctx, blockNumber) + Require(t, err) + if blockSeq.Hash() != blockReplica.Hash() { + t.Errorf("expected sequencer and replica to have same block hash, got %v and %v", blockSeq.Hash(), blockReplica.Hash()) + } +} diff --git a/system_tests/block_validator_test.go b/system_tests/block_validator_test.go index 4a9e38d25f..d6ae4973ac 100644 --- a/system_tests/block_validator_test.go +++ b/system_tests/block_validator_test.go @@ -19,6 +19,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbos/l2pricing" @@ -58,7 +59,7 @@ func testBlockValidatorSimple(t *testing.T, opts Options) { chainConfig, l1NodeConfigA, lifecycleManager, _, dasSignerKey := setupConfigWithDAS(t, ctx, opts.dasModeString) defer lifecycleManager.StopAndWaitUntil(time.Second) if opts.workload == upgradeArbOs { - chainConfig.ArbitrumChainParams.InitialArbOSVersion = 10 + chainConfig.ArbitrumChainParams.InitialArbOSVersion = params.ArbosVersion_10 } var delayEvery int diff --git a/system_tests/das_test.go b/system_tests/das_test.go index db0ea12827..c90852fe75 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -85,6 +85,7 @@ func aggConfigForBackend(backendConfig das.BackendConfig) das.AggregatorConfig { AssumedHonest: 1, Backends: das.BackendConfigList{backendConfig}, MaxStoreChunkBodySize: 512 * 1024, + EnableChunkedStore: true, } } diff --git a/system_tests/debugapi_test.go b/system_tests/debugapi_test.go index 6be79ed4c9..fd1aa746a3 100644 --- a/system_tests/debugapi_test.go +++ b/system_tests/debugapi_test.go @@ -3,6 +3,8 @@ package arbtest import ( "context" "encoding/json" + "fmt" + "math/big" "testing" "github.com/ethereum/go-ethereum/common" @@ -10,10 +12,16 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/gasestimator" "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" + "github.com/offchainlabs/nitro/arbos/l2pricing" + "github.com/offchainlabs/nitro/arbos/retryables" + "github.com/offchainlabs/nitro/solgen/go/node_interfacegen" "github.com/offchainlabs/nitro/solgen/go/precompilesgen" + "github.com/offchainlabs/nitro/util/arbmath" ) func TestDebugAPI(t *testing.T) { @@ -57,3 +65,230 @@ func TestDebugAPI(t *testing.T) { err = l2rpc.CallContext(ctx, &result, "debug_traceTransaction", tx.Hash(), &tracers.TraceConfig{Tracer: &flatCallTracer}) Require(t, err) } + +type account struct { + Balance *hexutil.Big `json:"balance,omitempty"` + Code []byte `json:"code,omitempty"` + Nonce uint64 `json:"nonce,omitempty"` + Storage map[common.Hash]common.Hash `json:"storage,omitempty"` +} +type prestateTrace struct { + Post map[common.Address]*account `json:"post"` + Pre map[common.Address]*account `json:"pre"` +} + +func TestPrestateTracingSimple(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + builder := NewNodeBuilder(ctx).DefaultConfig(t, true) + cleanup := builder.Build(t) + defer cleanup() + + builder.L2Info.GenerateAccount("User2") + sender := builder.L2Info.GetAddress("Owner") + receiver := builder.L2Info.GetAddress("User2") + ownerOldBalance, err := builder.L2.Client.BalanceAt(ctx, sender, nil) + Require(t, err) + user2OldBalance, err := builder.L2.Client.BalanceAt(ctx, receiver, nil) + Require(t, err) + + value := big.NewInt(1e6) + tx := builder.L2Info.PrepareTx("Owner", "User2", builder.L2Info.TransferGas, value, nil) + Require(t, builder.L2.Client.SendTransaction(ctx, tx)) + _, err = builder.L2.EnsureTxSucceeded(tx) + Require(t, err) + + l2rpc := builder.L2.Stack.Attach() + + var result prestateTrace + traceConfig := map[string]interface{}{ + "tracer": "prestateTracer", + "tracerConfig": map[string]interface{}{ + "diffMode": true, + }, + } + err = l2rpc.CallContext(ctx, &result, "debug_traceTransaction", tx.Hash(), traceConfig) + Require(t, err) + + if !arbmath.BigEquals(result.Pre[sender].Balance.ToInt(), ownerOldBalance) { + Fatal(t, "Unexpected initial balance of sender") + } + if !arbmath.BigEquals(result.Pre[receiver].Balance.ToInt(), user2OldBalance) { + Fatal(t, "Unexpected initial balance of receiver") + } + if !arbmath.BigEquals(result.Post[sender].Balance.ToInt(), arbmath.BigSub(ownerOldBalance, value)) { + Fatal(t, "Unexpected final balance of sender") + } + if !arbmath.BigEquals(result.Post[receiver].Balance.ToInt(), value) { + Fatal(t, "Unexpected final balance of receiver") + } + if result.Post[sender].Nonce != result.Pre[sender].Nonce+1 { + Fatal(t, "sender nonce increment wasn't registered") + } + if result.Post[receiver].Nonce != result.Pre[receiver].Nonce { + Fatal(t, "receiver nonce shouldn't change") + } +} + +func TestPrestateTracingComplex(t *testing.T) { + builder, delayedInbox, lookupL2Tx, ctx, teardown := retryableSetup(t) + defer teardown() + + // Test prestate tracing of a ArbitrumDepositTx type tx + faucetAddr := builder.L1Info.GetAddress("Faucet") + oldBalance, err := builder.L2.Client.BalanceAt(ctx, faucetAddr, nil) + Require(t, err) + + txOpts := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) + txOpts.Value = big.NewInt(13) + + l1tx, err := delayedInbox.DepositEth439370b1(&txOpts) + Require(t, err) + + l1Receipt, err := builder.L1.EnsureTxSucceeded(l1tx) + Require(t, err) + if l1Receipt.Status != types.ReceiptStatusSuccessful { + t.Errorf("Got transaction status: %v, want: %v", l1Receipt.Status, types.ReceiptStatusSuccessful) + } + waitForL1DelayBlocks(t, builder) + + l2Tx := lookupL2Tx(l1Receipt) + l2Receipt, err := builder.L2.EnsureTxSucceeded(l2Tx) + Require(t, err) + newBalance, err := builder.L2.Client.BalanceAt(ctx, faucetAddr, l2Receipt.BlockNumber) + Require(t, err) + if got := new(big.Int); got.Sub(newBalance, oldBalance).Cmp(txOpts.Value) != 0 { + t.Errorf("Got transferred: %v, want: %v", got, txOpts.Value) + } + + l2rpc := builder.L2.Stack.Attach() + var result prestateTrace + traceConfig := map[string]interface{}{ + "tracer": "prestateTracer", + "tracerConfig": map[string]interface{}{ + "diffMode": true, + }, + } + err = l2rpc.CallContext(ctx, &result, "debug_traceTransaction", l2Tx.Hash(), traceConfig) + Require(t, err) + + if _, ok := result.Pre[faucetAddr]; !ok { + Fatal(t, "Faucet account not found in the result of prestate tracer") + } + // Nonce shouldn't exist (in this case defaults to 0) in the Post map of the trace in DiffMode + if l2Tx.SkipAccountChecks() && result.Post[faucetAddr].Nonce != 0 { + Fatal(t, "Faucet account's nonce should remain unchanged ") + } + if !arbmath.BigEquals(result.Pre[faucetAddr].Balance.ToInt(), oldBalance) { + Fatal(t, "Unexpected initial balance of Faucet") + } + if !arbmath.BigEquals(result.Post[faucetAddr].Balance.ToInt(), arbmath.BigAdd(oldBalance, txOpts.Value)) { + Fatal(t, "Unexpected final balance of Faucet") + } + + // Test prestate tracing of a ArbitrumSubmitRetryableTx type tx + user2Address := builder.L2Info.GetAddress("User2") + beneficiaryAddress := builder.L2Info.GetAddress("Beneficiary") + + deposit := arbmath.BigMul(big.NewInt(1e12), big.NewInt(1e12)) + callValue := big.NewInt(1e6) + + nodeInterface, err := node_interfacegen.NewNodeInterface(types.NodeInterfaceAddress, builder.L2.Client) + Require(t, err, "failed to deploy NodeInterface") + + // estimate the gas needed to auto redeem the retryable + usertxoptsL2 := builder.L2Info.GetDefaultTransactOpts("Faucet", ctx) + usertxoptsL2.NoSend = true + usertxoptsL2.GasMargin = 0 + tx, err := nodeInterface.EstimateRetryableTicket( + &usertxoptsL2, + usertxoptsL2.From, + deposit, + user2Address, + callValue, + beneficiaryAddress, + beneficiaryAddress, + []byte{0x32, 0x42, 0x32, 0x88}, // increase the cost to beyond that of params.TxGas + ) + Require(t, err, "failed to estimate retryable submission") + estimate := tx.Gas() + expectedEstimate := params.TxGas + params.TxDataNonZeroGasEIP2028*4 + if float64(estimate) > float64(expectedEstimate)*(1+gasestimator.EstimateGasErrorRatio) { + t.Errorf("estimated retryable ticket at %v gas but expected %v, with error margin of %v", + estimate, + expectedEstimate, + gasestimator.EstimateGasErrorRatio, + ) + } + + // submit & auto redeem the retryable using the gas estimate + usertxoptsL1 := builder.L1Info.GetDefaultTransactOpts("Faucet", ctx) + usertxoptsL1.Value = deposit + l1tx, err = delayedInbox.CreateRetryableTicket( + &usertxoptsL1, + user2Address, + callValue, + big.NewInt(1e16), + beneficiaryAddress, + beneficiaryAddress, + arbmath.UintToBig(estimate), + big.NewInt(l2pricing.InitialBaseFeeWei*2), + []byte{0x32, 0x42, 0x32, 0x88}, + ) + Require(t, err) + + l1Receipt, err = builder.L1.EnsureTxSucceeded(l1tx) + Require(t, err) + if l1Receipt.Status != types.ReceiptStatusSuccessful { + Fatal(t, "l1Receipt indicated failure") + } + + waitForL1DelayBlocks(t, builder) + + l2Tx = lookupL2Tx(l1Receipt) + receipt, err := builder.L2.EnsureTxSucceeded(l2Tx) + Require(t, err) + if receipt.Status != types.ReceiptStatusSuccessful { + Fatal(t) + } + + l2balance, err := builder.L2.Client.BalanceAt(ctx, builder.L2Info.GetAddress("User2"), nil) + Require(t, err) + if !arbmath.BigEquals(l2balance, callValue) { + Fatal(t, "Unexpected balance:", l2balance) + } + + ticketId := receipt.Logs[0].Topics[1] + firstRetryTxId := receipt.Logs[1].Topics[2] + fmt.Println("submitretryable txid ", ticketId) + fmt.Println("auto redeem txid ", firstRetryTxId) + + // Trace ArbitrumSubmitRetryableTx + result = prestateTrace{} + err = l2rpc.CallContext(ctx, &result, "debug_traceTransaction", l2Tx.Hash(), traceConfig) + Require(t, err) + + escrowAddr := retryables.RetryableEscrowAddress(ticketId) + if _, ok := result.Pre[escrowAddr]; !ok { + Fatal(t, "Escrow account not found in the result of prestate tracer for a ArbitrumSubmitRetryableTx transaction") + } + + if !arbmath.BigEquals(result.Pre[escrowAddr].Balance.ToInt(), common.Big0) { + Fatal(t, "Unexpected initial balance of Escrow") + } + if !arbmath.BigEquals(result.Post[escrowAddr].Balance.ToInt(), callValue) { + Fatal(t, "Unexpected final balance of Escrow") + } + + // Trace ArbitrumRetryTx + result = prestateTrace{} + err = l2rpc.CallContext(ctx, &result, "debug_traceTransaction", firstRetryTxId, traceConfig) + Require(t, err) + + if !arbmath.BigEquals(result.Pre[user2Address].Balance.ToInt(), common.Big0) { + Fatal(t, "Unexpected initial balance of User2") + } + if !arbmath.BigEquals(result.Post[user2Address].Balance.ToInt(), callValue) { + Fatal(t, "Unexpected final balance of User2") + } +} diff --git a/system_tests/estimation_test.go b/system_tests/estimation_test.go index e489b1864e..37e1efe8c5 100644 --- a/system_tests/estimation_test.go +++ b/system_tests/estimation_test.go @@ -162,7 +162,7 @@ func TestDifficultyForArbOSTen(t *testing.T) { defer cancel() builder := NewNodeBuilder(ctx).DefaultConfig(t, false) - builder.chainConfig.ArbitrumChainParams.InitialArbOSVersion = 10 + builder.chainConfig.ArbitrumChainParams.InitialArbOSVersion = params.ArbosVersion_10 cleanup := builder.Build(t) defer cleanup() diff --git a/system_tests/fees_test.go b/system_tests/fees_test.go index 76de23e2cb..5540728df8 100644 --- a/system_tests/fees_test.go +++ b/system_tests/fees_test.go @@ -89,10 +89,10 @@ func TestSequencerFeePaid(t *testing.T) { feePaidForL2 := arbmath.BigMulByUint(gasPrice, gasUsedForL2) tipPaidToNet := arbmath.BigMulByUint(tipCap, receipt.GasUsedForL1) gotTip := arbmath.BigEquals(networkRevenue, arbmath.BigAdd(feePaidForL2, tipPaidToNet)) - if !gotTip && version == 9 { + if !gotTip && version == params.ArbosVersion_9 { Fatal(t, "network didn't receive expected payment", networkRevenue, feePaidForL2, tipPaidToNet) } - if gotTip && version != 9 { + if gotTip && version != params.ArbosVersion_9 { Fatal(t, "tips are somehow enabled") } @@ -110,7 +110,7 @@ func TestSequencerFeePaid(t *testing.T) { return networkRevenue, tipPaidToNet } - if version != 9 { + if version != params.ArbosVersion_9 { testFees(3) return } diff --git a/system_tests/precompile_test.go b/system_tests/precompile_test.go index 78f34df6c7..5bc6315086 100644 --- a/system_tests/precompile_test.go +++ b/system_tests/precompile_test.go @@ -14,6 +14,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" "github.com/offchainlabs/nitro/arbos" "github.com/offchainlabs/nitro/arbos/l1pricing" @@ -27,7 +28,7 @@ func TestPurePrecompileMethodCalls(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - arbosVersion := uint64(31) + arbosVersion := params.ArbosVersion_31 builder := NewNodeBuilder(ctx). DefaultConfig(t, false). WithArbOSVersion(arbosVersion) @@ -504,57 +505,6 @@ func TestGetBrotliCompressionLevel(t *testing.T) { } } -func TestScheduleArbosUpgrade(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - builder := NewNodeBuilder(ctx).DefaultConfig(t, false) - cleanup := builder.Build(t) - defer cleanup() - - auth := builder.L2Info.GetDefaultTransactOpts("Owner", ctx) - - arbOwnerPublic, err := precompilesgen.NewArbOwnerPublic(common.HexToAddress("0x6b"), builder.L2.Client) - Require(t, err, "could not bind ArbOwner contract") - - arbOwner, err := precompilesgen.NewArbOwner(common.HexToAddress("0x70"), builder.L2.Client) - Require(t, err, "could not bind ArbOwner contract") - - callOpts := &bind.CallOpts{Context: ctx} - scheduled, err := arbOwnerPublic.GetScheduledUpgrade(callOpts) - Require(t, err, "failed to call GetScheduledUpgrade before scheduling upgrade") - if scheduled.ArbosVersion != 0 || scheduled.ScheduledForTimestamp != 0 { - t.Errorf("expected no upgrade to be scheduled, got version %v timestamp %v", scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) - } - - // Schedule a noop upgrade, which should test GetScheduledUpgrade in the same way an already completed upgrade would. - tx, err := arbOwner.ScheduleArbOSUpgrade(&auth, 1, 1) - Require(t, err) - _, err = builder.L2.EnsureTxSucceeded(tx) - Require(t, err) - - scheduled, err = arbOwnerPublic.GetScheduledUpgrade(callOpts) - Require(t, err, "failed to call GetScheduledUpgrade after scheduling noop upgrade") - if scheduled.ArbosVersion != 0 || scheduled.ScheduledForTimestamp != 0 { - t.Errorf("expected completed scheduled upgrade to be ignored, got version %v timestamp %v", scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) - } - - // TODO: Once we have an ArbOS 30, test a real upgrade with it - // We can't test 11 -> 20 because 11 doesn't have the GetScheduledUpgrade method we want to test - var testVersion uint64 = 100 - var testTimestamp uint64 = 1 << 62 - tx, err = arbOwner.ScheduleArbOSUpgrade(&auth, 100, 1<<62) - Require(t, err) - _, err = builder.L2.EnsureTxSucceeded(tx) - Require(t, err) - - scheduled, err = arbOwnerPublic.GetScheduledUpgrade(callOpts) - Require(t, err, "failed to call GetScheduledUpgrade after scheduling upgrade") - if scheduled.ArbosVersion != testVersion || scheduled.ScheduledForTimestamp != testTimestamp { - t.Errorf("expected upgrade to be scheduled for version %v timestamp %v, got version %v timestamp %v", testVersion, testTimestamp, scheduled.ArbosVersion, scheduled.ScheduledForTimestamp) - } -} - func TestArbStatistics(t *testing.T) { t.Parallel() diff --git a/system_tests/retryable_test.go b/system_tests/retryable_test.go index 55d26c8372..49bba81374 100644 --- a/system_tests/retryable_test.go +++ b/system_tests/retryable_test.go @@ -316,7 +316,7 @@ func testSubmitRetryableEmptyEscrow(t *testing.T, arbosVersion uint64) { state, err := builder.L2.ExecNode.ArbInterface.BlockChain().State() Require(t, err) escrowExists := state.Exist(escrowAccount) - if escrowExists != (arbosVersion < 30) { + if escrowExists != (arbosVersion < params.ArbosVersion_30) { Fatal(t, "Escrow account existance", escrowExists, "doesn't correspond to ArbOS version", arbosVersion) } } diff --git a/system_tests/seq_filter_test.go b/system_tests/seq_filter_test.go new file mode 100644 index 0000000000..fdd0c96d13 --- /dev/null +++ b/system_tests/seq_filter_test.go @@ -0,0 +1,146 @@ +package arbtest + +import ( + "context" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/arbitrum_types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + + "github.com/offchainlabs/nitro/arbos" + "github.com/offchainlabs/nitro/arbos/arbosState" + "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/arbos/l1pricing" + "github.com/offchainlabs/nitro/util/arbmath" +) + +func TestSequencerTxFilter(t *testing.T) { + t.Parallel() + + builder, header, txes, hooks, cleanup := setupSequencerFilterTest(t, false) + defer cleanup() + + block, err := builder.L2.ExecNode.ExecEngine.SequenceTransactions(header, txes, hooks) + Require(t, err) // There shouldn't be any error in block generation + if block == nil { + t.Fatal("block should be generated as second tx should pass") + } + if len(block.Transactions()) != 2 { + t.Fatalf("expecting two txs found: %d", len(block.Transactions())) + } + if block.Transactions()[1].Hash() != txes[1].Hash() { + t.Fatal("tx hash mismatch, expecting second tx to be present in the block") + } + if len(hooks.TxErrors) != 2 { + t.Fatalf("expected 2 txErrors in hooks, found: %d", len(hooks.TxErrors)) + } + if hooks.TxErrors[0].Error() != state.ErrArbTxFilter.Error() { + t.Fatalf("expected ErrArbTxFilter, found: %s", err.Error()) + } + if hooks.TxErrors[1] != nil { + t.Fatalf("found a non-nil error for second transaction: %v", hooks.TxErrors[1]) + } +} + +func TestSequencerBlockFilterReject(t *testing.T) { + t.Parallel() + + builder, header, txes, hooks, cleanup := setupSequencerFilterTest(t, true) + defer cleanup() + + block, err := builder.L2.ExecNode.ExecEngine.SequenceTransactions(header, txes, hooks) + if block != nil { + t.Fatal("block shouldn't be generated when all txes have failed") + } + if err == nil { + t.Fatal("expected ErrArbTxFilter but found nil") + } + if err.Error() != state.ErrArbTxFilter.Error() { + t.Fatalf("expected ErrArbTxFilter, found: %s", err.Error()) + } +} + +func TestSequencerBlockFilterAccept(t *testing.T) { + t.Parallel() + + builder, header, txes, hooks, cleanup := setupSequencerFilterTest(t, true) + defer cleanup() + + block, err := builder.L2.ExecNode.ExecEngine.SequenceTransactions(header, txes[1:], hooks) + Require(t, err) + if block == nil { + t.Fatal("block should be generated as the tx should pass") + } + if len(block.Transactions()) != 2 { + t.Fatalf("expecting two txs found: %d", len(block.Transactions())) + } + if block.Transactions()[1].Hash() != txes[1].Hash() { + t.Fatal("tx hash mismatch, expecting second tx to be present in the block") + } +} + +func setupSequencerFilterTest(t *testing.T, isBlockFilter bool) (*NodeBuilder, *arbostypes.L1IncomingMessageHeader, types.Transactions, *arbos.SequencingHooks, func()) { + ctx, cancel := context.WithCancel(context.Background()) + + builder := NewNodeBuilder(ctx).DefaultConfig(t, false) + builder.isSequencer = true + builderCleanup := builder.Build(t) + + builder.L2Info.GenerateAccount("User") + var latestL2 uint64 + var err error + for i := 0; latestL2 < 3; i++ { + _, _ = builder.L2.TransferBalance(t, "Owner", "User", big.NewInt(1e18), builder.L2Info) + latestL2, err = builder.L2.Client.BlockNumber(ctx) + Require(t, err) + } + + header := &arbostypes.L1IncomingMessageHeader{ + Kind: arbostypes.L1MessageType_L2Message, + Poster: l1pricing.BatchPosterAddress, + BlockNumber: 1, + Timestamp: arbmath.SaturatingUCast[uint64](time.Now().Unix()), + RequestId: nil, + L1BaseFee: nil, + } + + var txes types.Transactions + txes = append(txes, builder.L2Info.PrepareTx("Owner", "User", builder.L2Info.TransferGas, big.NewInt(1e12), []byte{1, 2, 3})) + txes = append(txes, builder.L2Info.PrepareTx("User", "Owner", builder.L2Info.TransferGas, big.NewInt(1e12), nil)) + + hooks := arbos.NoopSequencingHooks() + if isBlockFilter { + hooks.BlockFilter = func(_ *types.Header, _ *state.StateDB, txes types.Transactions, _ types.Receipts) error { + if len(txes[1].Data()) > 0 { + return state.ErrArbTxFilter + } + return nil + } + } else { + hooks.PreTxFilter = func(_ *params.ChainConfig, _ *types.Header, statedb *state.StateDB, _ *arbosState.ArbosState, tx *types.Transaction, _ *arbitrum_types.ConditionalOptions, _ common.Address, _ *arbos.L1Info) error { + if len(tx.Data()) > 0 { + statedb.FilterTx() + } + return nil + } + hooks.PostTxFilter = func(_ *types.Header, statedb *state.StateDB, _ *arbosState.ArbosState, tx *types.Transaction, _ common.Address, _ uint64, _ *core.ExecutionResult) error { + if statedb.IsTxFiltered() { + return state.ErrArbTxFilter + } + return nil + } + } + + cleanup := func() { + builderCleanup() + cancel() + } + + return builder, header, txes, hooks, cleanup +} diff --git a/system_tests/seqinbox_test.go b/system_tests/seqinbox_test.go index a9f66b0e2f..e0da2d4f3f 100644 --- a/system_tests/seqinbox_test.go +++ b/system_tests/seqinbox_test.go @@ -139,7 +139,6 @@ func testSequencerInboxReaderImpl(t *testing.T, validator bool) { defer cancel() builder := NewNodeBuilder(ctx).DefaultConfig(t, true) - builder.nodeConfig.InboxReader.HardReorg = true if validator { builder.nodeConfig.BlockValidator.Enable = true } diff --git a/system_tests/staker_test.go b/system_tests/staker_test.go index 69645d8878..55c13d664d 100644 --- a/system_tests/staker_test.go +++ b/system_tests/staker_test.go @@ -504,7 +504,7 @@ func TestGetValidatorWalletContractWithDataposterOnlyUsedToCreateValidatorWallet parentChainID, ) if err != nil { - log.Crit("error creating data poster to create validator wallet contract", "err", err) + Fatal(t, "error creating data poster to create validator wallet contract", "err", err) } getExtraGas := func() uint64 { return builder.nodeConfig.Staker.ExtraGas } diff --git a/system_tests/transfer_test.go b/system_tests/transfer_test.go index a49e059351..c221ecc137 100644 --- a/system_tests/transfer_test.go +++ b/system_tests/transfer_test.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" ) func TestTransfer(t *testing.T) { @@ -51,12 +52,12 @@ func TestP256Verify(t *testing.T) { }{ { desc: "p256 should not be enabled on arbOS 20", - initialVersion: 20, + initialVersion: params.ArbosVersion_20, want: nil, }, { desc: "p256 should be enabled on arbOS 20", - initialVersion: 30, + initialVersion: params.ArbosVersion_30, want: common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"), }, } {